2024-11-14 03:54:04,918 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 03:54:04,937 main DEBUG Took 0.016129 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-14 03:54:04,937 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-14 03:54:04,938 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-14 03:54:04,939 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-14 03:54:04,940 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,950 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-14 03:54:04,968 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,970 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,970 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,971 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,972 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,972 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,973 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,974 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,974 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,975 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,976 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,976 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,977 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,977 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,978 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,978 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,979 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,979 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,980 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,980 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,981 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,981 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,982 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,983 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 03:54:04,983 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,983 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-14 03:54:04,985 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 03:54:04,989 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-14 03:54:04,991 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-14 03:54:04,992 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-14 03:54:04,994 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-14 03:54:04,994 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 03:54:05,006 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-14 03:54:05,010 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-14 03:54:05,013 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-14 03:54:05,013 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-14 03:54:05,014 main DEBUG createAppenders(={Console}) 2024-11-14 03:54:05,015 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-14 03:54:05,015 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 03:54:05,015 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-14 03:54:05,016 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-14 03:54:05,016 main DEBUG OutputStream closed 2024-11-14 03:54:05,017 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-14 03:54:05,017 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-14 03:54:05,017 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-14 03:54:05,095 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-14 03:54:05,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-14 03:54:05,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-14 03:54:05,099 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-14 03:54:05,100 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-14 03:54:05,100 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-14 03:54:05,100 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-14 03:54:05,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-14 03:54:05,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-14 03:54:05,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-14 03:54:05,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-14 03:54:05,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-14 03:54:05,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-14 03:54:05,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-14 03:54:05,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-14 03:54:05,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-14 03:54:05,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-14 03:54:05,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-14 03:54:05,106 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14 03:54:05,107 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-14 03:54:05,107 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-14 03:54:05,108 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-14T03:54:05,436 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a 2024-11-14 03:54:05,440 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-14 03:54:05,440 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14T03:54:05,454 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-14T03:54:05,494 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=434, ProcessCount=11, AvailableMemoryMB=6757 2024-11-14T03:54:05,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:54:05,522 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e, deleteOnExit=true 2024-11-14T03:54:05,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:54:05,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/test.cache.data in system properties and HBase conf 2024-11-14T03:54:05,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:54:05,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:54:05,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:54:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:54:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:54:05,656 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-14T03:54:05,780 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:54:05,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:54:05,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:54:05,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:54:05,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:54:05,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:54:05,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:54:05,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:54:05,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:54:05,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:54:05,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:54:05,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:54:05,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:54:05,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:54:05,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:54:06,457 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:54:06,851 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-14T03:54:06,943 INFO [Time-limited test {}] log.Log(170): Logging initialized @2954ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-14T03:54:07,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:54:07,099 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:54:07,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:54:07,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:54:07,120 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:54:07,133 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:54:07,136 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:54:07,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:54:07,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/java.io.tmpdir/jetty-localhost-42023-hadoop-hdfs-3_4_1-tests_jar-_-any-11553092733749538608/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:54:07,380 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:42023} 2024-11-14T03:54:07,380 INFO [Time-limited test {}] server.Server(415): Started @3392ms 2024-11-14T03:54:07,412 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:54:07,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:54:07,835 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:54:07,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:54:07,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:54:07,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:54:07,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:54:07,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:54:07,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/java.io.tmpdir/jetty-localhost-32837-hadoop-hdfs-3_4_1-tests_jar-_-any-17555657025966110374/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:54:07,990 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:32837} 2024-11-14T03:54:07,990 INFO [Time-limited test {}] server.Server(415): Started @4002ms 2024-11-14T03:54:08,056 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:54:08,219 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:54:08,227 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:54:08,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:54:08,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:54:08,230 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:54:08,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:54:08,234 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:54:08,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/java.io.tmpdir/jetty-localhost-33675-hadoop-hdfs-3_4_1-tests_jar-_-any-8610375620126482002/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:54:08,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:33675} 2024-11-14T03:54:08,398 INFO [Time-limited test {}] server.Server(415): Started @4411ms 2024-11-14T03:54:08,401 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:54:08,596 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data1/current/BP-251123023-172.17.0.2-1731556446590/current, will proceed with Du for space computation calculation, 2024-11-14T03:54:08,599 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data3/current/BP-251123023-172.17.0.2-1731556446590/current, will proceed with Du for space computation calculation, 2024-11-14T03:54:08,600 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data2/current/BP-251123023-172.17.0.2-1731556446590/current, will proceed with Du for space computation calculation, 2024-11-14T03:54:08,600 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data4/current/BP-251123023-172.17.0.2-1731556446590/current, will proceed with Du for space computation calculation, 2024-11-14T03:54:08,683 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:54:08,684 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:54:08,771 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f65e2fb365decac with lease ID 0x895e704f219c4f22: Processing first storage report for DS-7cbac17e-95bb-407a-b30c-6af745554908 from datanode DatanodeRegistration(127.0.0.1:34925, datanodeUuid=8a269981-82b0-44e3-92f2-8a000f970dc0, infoPort=38529, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590) 2024-11-14T03:54:08,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f65e2fb365decac with lease ID 0x895e704f219c4f22: from storage DS-7cbac17e-95bb-407a-b30c-6af745554908 node DatanodeRegistration(127.0.0.1:34925, datanodeUuid=8a269981-82b0-44e3-92f2-8a000f970dc0, infoPort=38529, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-14T03:54:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bcde9fc0e7bc458 with lease ID 0x895e704f219c4f21: Processing first storage report for DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e from datanode DatanodeRegistration(127.0.0.1:34101, datanodeUuid=730c4d3f-4b73-4006-abf0-3d6ba2d70bf3, infoPort=45605, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590) 2024-11-14T03:54:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bcde9fc0e7bc458 with lease ID 0x895e704f219c4f21: from storage DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e node DatanodeRegistration(127.0.0.1:34101, datanodeUuid=730c4d3f-4b73-4006-abf0-3d6ba2d70bf3, infoPort=45605, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T03:54:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f65e2fb365decac with lease ID 0x895e704f219c4f22: Processing first storage report for DS-b24d1cca-8cf1-43e1-8cf2-cdca8372489d from datanode DatanodeRegistration(127.0.0.1:34925, datanodeUuid=8a269981-82b0-44e3-92f2-8a000f970dc0, infoPort=38529, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590) 2024-11-14T03:54:08,774 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f65e2fb365decac with lease ID 0x895e704f219c4f22: from storage DS-b24d1cca-8cf1-43e1-8cf2-cdca8372489d node DatanodeRegistration(127.0.0.1:34925, datanodeUuid=8a269981-82b0-44e3-92f2-8a000f970dc0, infoPort=38529, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:54:08,787 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9bcde9fc0e7bc458 with lease ID 0x895e704f219c4f21: Processing first storage report for DS-1476ccce-c546-4f22-a309-bd7702b8e7d9 from datanode DatanodeRegistration(127.0.0.1:34101, datanodeUuid=730c4d3f-4b73-4006-abf0-3d6ba2d70bf3, infoPort=45605, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590) 2024-11-14T03:54:08,787 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9bcde9fc0e7bc458 with lease ID 0x895e704f219c4f21: from storage DS-1476ccce-c546-4f22-a309-bd7702b8e7d9 node DatanodeRegistration(127.0.0.1:34101, datanodeUuid=730c4d3f-4b73-4006-abf0-3d6ba2d70bf3, infoPort=45605, infoSecurePort=0, ipcPort=43597, storageInfo=lv=-57;cid=testClusterID;nsid=1262914110;c=1731556446590), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T03:54:08,957 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a 2024-11-14T03:54:09,102 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/zookeeper_0, clientPort=58995, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:54:09,121 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58995 2024-11-14T03:54:09,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:09,152 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:09,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:54:09,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:54:10,048 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d with version=8 2024-11-14T03:54:10,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:54:10,180 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-14T03:54:10,561 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:54:10,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:54:10,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:54:10,584 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:54:10,584 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:54:10,585 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:54:10,792 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:54:10,879 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-14T03:54:10,894 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-14T03:54:10,899 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:54:10,937 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 6348 (auto-detected) 2024-11-14T03:54:10,938 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-14T03:54:10,965 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44031 2024-11-14T03:54:10,996 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44031 connecting to ZooKeeper ensemble=127.0.0.1:58995 2024-11-14T03:54:11,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:440310x0, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:54:11,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44031-0x100ef618fdb0000 connected 2024-11-14T03:54:11,322 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:11,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:11,349 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:54:11,355 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d, hbase.cluster.distributed=false 2024-11-14T03:54:11,403 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:54:11,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44031 2024-11-14T03:54:11,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44031 2024-11-14T03:54:11,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44031 2024-11-14T03:54:11,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44031 2024-11-14T03:54:11,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44031 2024-11-14T03:54:11,593 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:54:11,596 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:54:11,596 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:54:11,597 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:54:11,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:54:11,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:54:11,602 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:54:11,605 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:54:11,606 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45463 2024-11-14T03:54:11,609 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45463 connecting to ZooKeeper ensemble=127.0.0.1:58995 2024-11-14T03:54:11,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:11,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:11,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454630x0, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:54:11,636 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:454630x0, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:54:11,641 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45463-0x100ef618fdb0001 connected 2024-11-14T03:54:11,643 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:54:11,661 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:54:11,674 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:54:11,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:54:11,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45463 2024-11-14T03:54:11,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45463 2024-11-14T03:54:11,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45463 2024-11-14T03:54:11,725 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45463 2024-11-14T03:54:11,729 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45463 2024-11-14T03:54:11,763 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:44031 2024-11-14T03:54:11,765 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,44031,1731556450259 2024-11-14T03:54:11,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:54:11,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:54:11,782 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,44031,1731556450259 2024-11-14T03:54:11,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:11,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:54:11,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:11,833 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:54:11,853 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,44031,1731556450259 from backup master directory 2024-11-14T03:54:11,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,44031,1731556450259 2024-11-14T03:54:11,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:54:11,862 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:54:11,863 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,44031,1731556450259 2024-11-14T03:54:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:54:11,866 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-14T03:54:11,867 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-14T03:54:11,975 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase.id] with ID: 4a001b38-8af8-4600-bd25-adecf218c522 2024-11-14T03:54:11,975 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/.tmp/hbase.id 2024-11-14T03:54:12,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:54:12,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:54:12,024 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/.tmp/hbase.id]:[hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase.id] 2024-11-14T03:54:12,108 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:12,114 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:54:12,143 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 25ms. 2024-11-14T03:54:12,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:54:12,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:54:12,216 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:54:12,219 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:54:12,230 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:54:12,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:54:12,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:54:12,313 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store 2024-11-14T03:54:12,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:54:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:54:12,358 INFO [master/6a989be77d39:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-14T03:54:12,362 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:54:12,364 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:54:12,364 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:54:12,365 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:54:12,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:54:12,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:54:12,367 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:54:12,369 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556452364Disabling compacts and flushes for region at 1731556452364Disabling writes for close at 1731556452367 (+3 ms)Writing region close event to WAL at 1731556452367Closed at 1731556452367 2024-11-14T03:54:12,373 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/.initializing 2024-11-14T03:54:12,373 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/WALs/6a989be77d39,44031,1731556450259 2024-11-14T03:54:12,411 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C44031%2C1731556450259, suffix=, logDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/WALs/6a989be77d39,44031,1731556450259, archiveDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/oldWALs, maxLogs=10 2024-11-14T03:54:12,423 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C44031%2C1731556450259.1731556452418 2024-11-14T03:54:12,486 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/WALs/6a989be77d39,44031,1731556450259/6a989be77d39%2C44031%2C1731556450259.1731556452418 2024-11-14T03:54:12,508 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38529:38529),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-14T03:54:12,509 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:54:12,510 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:54:12,518 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,519 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:54:12,623 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:12,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:12,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:54:12,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:12,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:54:12,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:54:12,640 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:12,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:54:12,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:54:12,646 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:12,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:54:12,649 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,653 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,655 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,663 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,664 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,669 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:54:12,673 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:54:12,679 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:54:12,681 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769553, jitterRate=-0.02146296203136444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:54:12,690 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556452541Initializing all the Stores at 1731556452548 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556452549 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556452550 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556452550Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556452550Cleaning up temporary data from old regions at 1731556452664 (+114 ms)Region opened successfully at 1731556452689 (+25 ms) 2024-11-14T03:54:12,692 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:54:12,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f04549f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:54:12,802 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:54:12,819 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:54:12,819 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:54:12,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:54:12,826 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-14T03:54:12,832 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-14T03:54:12,833 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:54:12,871 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:54:12,884 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:54:12,887 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:54:12,891 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:54:12,893 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:54:12,895 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:54:12,898 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:54:12,904 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:54:12,906 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:54:12,908 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:54:12,913 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:54:12,938 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:54:12,945 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:54:12,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:54:12,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:54:12,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,956 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,44031,1731556450259, sessionid=0x100ef618fdb0000, setting cluster-up flag (Was=false) 2024-11-14T03:54:12,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,982 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:54:12,987 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,44031,1731556450259 2024-11-14T03:54:12,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:12,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:13,002 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:54:13,005 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,44031,1731556450259 2024-11-14T03:54:13,013 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:54:13,041 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(746): ClusterId : 4a001b38-8af8-4600-bd25-adecf218c522 2024-11-14T03:54:13,044 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:54:13,050 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:54:13,050 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:54:13,055 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:54:13,056 DEBUG [RS:0;6a989be77d39:45463 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b889f05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:54:13,077 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:45463 2024-11-14T03:54:13,082 INFO [RS:0;6a989be77d39:45463 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:54:13,082 INFO [RS:0;6a989be77d39:45463 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:54:13,082 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:54:13,086 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,44031,1731556450259 with port=45463, startcode=1731556451536 2024-11-14T03:54:13,100 DEBUG [RS:0;6a989be77d39:45463 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:54:13,115 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:54:13,126 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:54:13,136 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:54:13,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,44031,1731556450259 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:54:13,162 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:54:13,162 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:54:13,162 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:54:13,162 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:54:13,163 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:54:13,163 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,163 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:54:13,163 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,179 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556483179 2024-11-14T03:54:13,181 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:54:13,182 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:54:13,183 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:54:13,184 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:54:13,188 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:54:13,188 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:54:13,189 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39991, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:54:13,189 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:54:13,189 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:54:13,190 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,192 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:13,192 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:54:13,194 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:54:13,196 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:54:13,196 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:54:13,201 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:54:13,202 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:54:13,199 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44031 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:54:13,204 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556453203,5,FailOnTimeoutGroup] 2024-11-14T03:54:13,205 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556453204,5,FailOnTimeoutGroup] 2024-11-14T03:54:13,205 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,205 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:54:13,207 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,207 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,240 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T03:54:13,240 WARN [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T03:54:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:54:13,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:54:13,248 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:54:13,249 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d 2024-11-14T03:54:13,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:54:13,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:54:13,277 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:54:13,280 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:54:13,284 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:54:13,285 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:13,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:13,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:54:13,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:54:13,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:13,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:13,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:54:13,297 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:54:13,297 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:13,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:13,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:54:13,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:54:13,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:13,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:13,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:54:13,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740 2024-11-14T03:54:13,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740 2024-11-14T03:54:13,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:54:13,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:54:13,317 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:54:13,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:54:13,325 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:54:13,326 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699404, jitterRate=-0.11066269874572754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:54:13,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556453277Initializing all the Stores at 1731556453280 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556453280Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556453280Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556453280Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556453280Cleaning up temporary data from old regions at 1731556453316 (+36 ms)Region opened successfully at 1731556453329 (+13 ms) 2024-11-14T03:54:13,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:54:13,329 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:54:13,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:54:13,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:54:13,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:54:13,332 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:54:13,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556453329Disabling compacts and flushes for region at 1731556453329Disabling writes for close at 1731556453330 (+1 ms)Writing region close event to WAL at 1731556453332 (+2 ms)Closed at 1731556453332 2024-11-14T03:54:13,337 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:54:13,337 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:54:13,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:54:13,349 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,44031,1731556450259 with port=45463, startcode=1731556451536 2024-11-14T03:54:13,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44031 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,355 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44031 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,359 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:54:13,363 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:54:13,369 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d 2024-11-14T03:54:13,370 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45187 2024-11-14T03:54:13,370 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:54:13,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:54:13,386 DEBUG [RS:0;6a989be77d39:45463 {}] zookeeper.ZKUtil(111): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,387 WARN [RS:0;6a989be77d39:45463 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:54:13,387 INFO [RS:0;6a989be77d39:45463 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:54:13,387 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,45463,1731556451536] 2024-11-14T03:54:13,437 INFO [RS:0;6a989be77d39:45463 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:54:13,455 INFO [RS:0;6a989be77d39:45463 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:54:13,462 INFO [RS:0;6a989be77d39:45463 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:54:13,462 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,464 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:54:13,471 INFO [RS:0;6a989be77d39:45463 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:54:13,473 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,474 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,474 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,474 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,474 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,474 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,477 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:54:13,478 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,478 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,478 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,478 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,478 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,479 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:54:13,479 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:54:13,479 DEBUG [RS:0;6a989be77d39:45463 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:54:13,482 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,482 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,482 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,483 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,483 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,483 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45463,1731556451536-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:54:13,509 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:54:13,512 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45463,1731556451536-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,512 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,512 INFO [RS:0;6a989be77d39:45463 {}] regionserver.Replication(171): 6a989be77d39,45463,1731556451536 started 2024-11-14T03:54:13,514 WARN [6a989be77d39:44031 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T03:54:13,536 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:13,537 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,45463,1731556451536, RpcServer on 6a989be77d39/172.17.0.2:45463, sessionid=0x100ef618fdb0001 2024-11-14T03:54:13,538 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:54:13,538 DEBUG [RS:0;6a989be77d39:45463 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,539 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,45463,1731556451536' 2024-11-14T03:54:13,539 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:54:13,540 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:54:13,541 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:54:13,542 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:54:13,542 DEBUG [RS:0;6a989be77d39:45463 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,542 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,45463,1731556451536' 2024-11-14T03:54:13,542 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:54:13,543 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:54:13,544 DEBUG [RS:0;6a989be77d39:45463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:54:13,544 INFO [RS:0;6a989be77d39:45463 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:54:13,544 INFO [RS:0;6a989be77d39:45463 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:54:13,653 INFO [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C45463%2C1731556451536, suffix=, logDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536, archiveDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs, maxLogs=32 2024-11-14T03:54:13,656 INFO [RS:0;6a989be77d39:45463 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556453656 2024-11-14T03:54:13,668 INFO [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556453656 2024-11-14T03:54:13,670 DEBUG [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45605:45605),(127.0.0.1/127.0.0.1:38529:38529)] 2024-11-14T03:54:13,766 DEBUG [6a989be77d39:44031 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:54:13,779 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,45463,1731556451536 2024-11-14T03:54:13,786 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,45463,1731556451536, state=OPENING 2024-11-14T03:54:13,791 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:54:13,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:13,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:54:13,794 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:54:13,794 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:54:13,795 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:54:13,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,45463,1731556451536}] 2024-11-14T03:54:13,975 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:54:13,979 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38211, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:54:13,991 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:54:13,992 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:54:13,997 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C45463%2C1731556451536.meta, suffix=.meta, logDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536, archiveDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs, maxLogs=32 2024-11-14T03:54:14,000 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.meta.1731556453999.meta 2024-11-14T03:54:14,023 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.meta.1731556453999.meta 2024-11-14T03:54:14,027 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45605:45605),(127.0.0.1/127.0.0.1:38529:38529)] 2024-11-14T03:54:14,030 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:54:14,033 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:54:14,037 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:54:14,044 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:54:14,050 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:54:14,051 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:54:14,052 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:54:14,052 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:54:14,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:54:14,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:54:14,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:14,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:14,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:54:14,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:54:14,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:14,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:14,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:54:14,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:54:14,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:14,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:14,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:54:14,069 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:54:14,069 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:14,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:54:14,070 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:54:14,072 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740 2024-11-14T03:54:14,074 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740 2024-11-14T03:54:14,076 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:54:14,076 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:54:14,077 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:54:14,080 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:54:14,081 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826712, jitterRate=0.05121876299381256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:54:14,081 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:54:14,083 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556454052Writing region info on filesystem at 1731556454053 (+1 ms)Initializing all the Stores at 1731556454056 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556454056Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556454057 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556454057Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556454057Cleaning up temporary data from old regions at 1731556454077 (+20 ms)Running coprocessor post-open hooks at 1731556454082 (+5 ms)Region opened successfully at 1731556454083 (+1 ms) 2024-11-14T03:54:14,091 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556453964 2024-11-14T03:54:14,104 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:54:14,105 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:54:14,107 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,45463,1731556451536 2024-11-14T03:54:14,110 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,45463,1731556451536, state=OPEN 2024-11-14T03:54:14,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:54:14,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:54:14,117 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:54:14,117 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:54:14,117 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,45463,1731556451536 2024-11-14T03:54:14,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:54:14,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,45463,1731556451536 in 321 msec 2024-11-14T03:54:14,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:54:14,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 779 msec 2024-11-14T03:54:14,133 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:54:14,134 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:54:14,155 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:54:14,157 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,45463,1731556451536, seqNum=-1] 2024-11-14T03:54:14,182 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:54:14,185 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33995, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:54:14,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1640 sec 2024-11-14T03:54:14,220 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556454220, completionTime=-1 2024-11-14T03:54:14,227 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:54:14,227 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:54:14,268 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:54:14,269 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556514269 2024-11-14T03:54:14,269 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556574269 2024-11-14T03:54:14,269 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 41 msec 2024-11-14T03:54:14,272 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44031,1731556450259-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,274 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44031,1731556450259-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,274 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44031,1731556450259-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,276 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:44031, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,277 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,282 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,287 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:54:14,316 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.452sec 2024-11-14T03:54:14,317 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:54:14,319 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:54:14,320 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:54:14,320 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:54:14,320 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:54:14,321 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44031,1731556450259-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:54:14,322 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44031,1731556450259-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:54:14,334 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:54:14,336 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:54:14,336 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44031,1731556450259-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:54:14,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a067e28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:54:14,358 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-14T03:54:14,358 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-14T03:54:14,363 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,44031,-1 for getting cluster id 2024-11-14T03:54:14,367 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:54:14,386 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4a001b38-8af8-4600-bd25-adecf218c522' 2024-11-14T03:54:14,392 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:54:14,392 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4a001b38-8af8-4600-bd25-adecf218c522" 2024-11-14T03:54:14,397 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f624545, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:54:14,397 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,44031,-1] 2024-11-14T03:54:14,401 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:54:14,411 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:54:14,417 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37282, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:54:14,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d55b43a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:54:14,435 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:54:14,455 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,45463,1731556451536, seqNum=-1] 2024-11-14T03:54:14,456 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:54:14,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:54:14,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,44031,1731556450259 2024-11-14T03:54:14,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:54:14,503 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:54:14,507 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T03:54:14,515 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(321): The fetched master address is 6a989be77d39,44031,1731556450259 2024-11-14T03:54:14,555 DEBUG [Time-limited test {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@50b12286 2024-11-14T03:54:14,557 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T03:54:14,581 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T03:54:14,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T03:54:14,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T03:54:14,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:54:14,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-14T03:54:14,621 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T03:54:14,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-14T03:54:14,625 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:14,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:54:14,632 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T03:54:14,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741835_1011 (size=389) 2024-11-14T03:54:14,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741835_1011 (size=389) 2024-11-14T03:54:14,705 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d9cfb2e227709f3283626f7a284a8953, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d 2024-11-14T03:54:14,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741836_1012 (size=72) 2024-11-14T03:54:14,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741836_1012 (size=72) 2024-11-14T03:54:15,160 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:54:15,161 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing d9cfb2e227709f3283626f7a284a8953, disabling compactions & flushes 2024-11-14T03:54:15,161 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,161 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,161 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. after waiting 0 ms 2024-11-14T03:54:15,161 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,161 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,161 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d9cfb2e227709f3283626f7a284a8953: Waiting for close lock at 1731556455161Disabling compacts and flushes for region at 1731556455161Disabling writes for close at 1731556455161Writing region close event to WAL at 1731556455161Closed at 1731556455161 2024-11-14T03:54:15,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T03:54:15,169 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731556455164"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556455164"}]},"ts":"1731556455164"} 2024-11-14T03:54:15,176 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T03:54:15,178 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T03:54:15,182 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556455178"}]},"ts":"1731556455178"} 2024-11-14T03:54:15,193 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-14T03:54:15,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9cfb2e227709f3283626f7a284a8953, ASSIGN}] 2024-11-14T03:54:15,198 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9cfb2e227709f3283626f7a284a8953, ASSIGN 2024-11-14T03:54:15,200 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9cfb2e227709f3283626f7a284a8953, ASSIGN; state=OFFLINE, location=6a989be77d39,45463,1731556451536; forceNewPlan=false, retain=false 2024-11-14T03:54:15,352 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d9cfb2e227709f3283626f7a284a8953, regionState=OPENING, regionLocation=6a989be77d39,45463,1731556451536 2024-11-14T03:54:15,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9cfb2e227709f3283626f7a284a8953, ASSIGN because future has completed 2024-11-14T03:54:15,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d9cfb2e227709f3283626f7a284a8953, server=6a989be77d39,45463,1731556451536}] 2024-11-14T03:54:15,521 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,522 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d9cfb2e227709f3283626f7a284a8953, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:54:15,522 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,522 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:54:15,523 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,523 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,526 INFO [StoreOpener-d9cfb2e227709f3283626f7a284a8953-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,529 INFO [StoreOpener-d9cfb2e227709f3283626f7a284a8953-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9cfb2e227709f3283626f7a284a8953 columnFamilyName info 2024-11-14T03:54:15,529 DEBUG [StoreOpener-d9cfb2e227709f3283626f7a284a8953-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:54:15,530 INFO [StoreOpener-d9cfb2e227709f3283626f7a284a8953-1 {}] regionserver.HStore(327): Store=d9cfb2e227709f3283626f7a284a8953/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:54:15,530 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,533 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,534 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,535 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,535 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,539 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,543 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:54:15,544 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d9cfb2e227709f3283626f7a284a8953; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798194, jitterRate=0.014957264065742493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:54:15,544 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:15,545 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d9cfb2e227709f3283626f7a284a8953: Running coprocessor pre-open hook at 1731556455523Writing region info on filesystem at 1731556455523Initializing all the Stores at 1731556455525 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556455525Cleaning up temporary data from old regions at 1731556455536 (+11 ms)Running coprocessor post-open hooks at 1731556455544 (+8 ms)Region opened successfully at 1731556455545 (+1 ms) 2024-11-14T03:54:15,547 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953., pid=6, masterSystemTime=1731556455513 2024-11-14T03:54:15,552 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,552 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:15,553 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d9cfb2e227709f3283626f7a284a8953, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,45463,1731556451536 2024-11-14T03:54:15,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d9cfb2e227709f3283626f7a284a8953, server=6a989be77d39,45463,1731556451536 because future has completed 2024-11-14T03:54:15,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T03:54:15,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d9cfb2e227709f3283626f7a284a8953, server=6a989be77d39,45463,1731556451536 in 205 msec 2024-11-14T03:54:15,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T03:54:15,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=d9cfb2e227709f3283626f7a284a8953, ASSIGN in 375 msec 2024-11-14T03:54:15,577 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T03:54:15,577 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556455577"}]},"ts":"1731556455577"} 2024-11-14T03:54:15,587 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-14T03:54:15,589 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T03:54:15,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 996 msec 2024-11-14T03:54:19,569 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-14T03:54:19,626 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T03:54:19,629 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-14T03:54:20,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:54:20,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T03:54:20,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T03:54:20,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T03:54:20,879 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:54:20,879 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T03:54:20,880 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T03:54:20,880 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T03:54:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:54:24,672 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-14T03:54:24,675 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-14T03:54:24,682 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-14T03:54:24,682 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:54:24,683 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556464683 2024-11-14T03:54:24,692 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:24,692 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:24,692 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:24,692 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:24,693 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:24,693 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556453656 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556464683 2024-11-14T03:54:24,695 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38529:38529),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-14T03:54:24,695 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556453656 is not closed yet, will try archiving it next time 2024-11-14T03:54:24,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741833_1009 (size=451) 2024-11-14T03:54:24,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741833_1009 (size=451) 2024-11-14T03:54:24,699 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556453656 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556453656 2024-11-14T03:54:24,707 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953., hostname=6a989be77d39,45463,1731556451536, seqNum=2] 2024-11-14T03:54:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45463 {}] regionserver.HRegion(8855): Flush requested on d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:36,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9cfb2e227709f3283626f7a284a8953 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:54:36,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/6557fee24929403d8cca894676da7e3f is 1080, key is row0001/info:/1731556464709/Put/seqid=0 2024-11-14T03:54:36,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741838_1014 (size=12509) 2024-11-14T03:54:36,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741838_1014 (size=12509) 2024-11-14T03:54:36,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/6557fee24929403d8cca894676da7e3f 2024-11-14T03:54:36,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/6557fee24929403d8cca894676da7e3f as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f 2024-11-14T03:54:36,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T03:54:36,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9cfb2e227709f3283626f7a284a8953 in 204ms, sequenceid=11, compaction requested=false 2024-11-14T03:54:36,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9cfb2e227709f3283626f7a284a8953: 2024-11-14T03:54:38,953 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:54:44,755 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556484755 2024-11-14T03:54:44,965 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:44,965 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:44,965 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:44,965 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:44,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:44,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:44,966 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556464683 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556484755 2024-11-14T03:54:44,967 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38529:38529),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-14T03:54:44,968 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556464683 is not closed yet, will try archiving it next time 2024-11-14T03:54:44,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741837_1013 (size=12399) 2024-11-14T03:54:44,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741837_1013 (size=12399) 2024-11-14T03:54:45,172 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:47,377 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:49,582 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:51,786 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:51,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45463 {}] regionserver.HRegion(8855): Flush requested on d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:54:51,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9cfb2e227709f3283626f7a284a8953 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:54:51,989 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:51,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/1abaca85d5b2464b8e2e09e112c25e27 is 1080, key is row0008/info:/1731556478744/Put/seqid=0 2024-11-14T03:54:52,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741840_1016 (size=12509) 2024-11-14T03:54:52,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741840_1016 (size=12509) 2024-11-14T03:54:52,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/1abaca85d5b2464b8e2e09e112c25e27 2024-11-14T03:54:52,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/1abaca85d5b2464b8e2e09e112c25e27 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/1abaca85d5b2464b8e2e09e112c25e27 2024-11-14T03:54:52,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/1abaca85d5b2464b8e2e09e112c25e27, entries=7, sequenceid=21, filesize=12.2 K 2024-11-14T03:54:52,227 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:52,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9cfb2e227709f3283626f7a284a8953 in 441ms, sequenceid=21, compaction requested=false 2024-11-14T03:54:52,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9cfb2e227709f3283626f7a284a8953: 2024-11-14T03:54:52,227 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-14T03:54:52,227 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:54:52,228 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f because midkey is the same as first or last row 2024-11-14T03:54:53,992 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:54,355 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T03:54:54,356 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T03:54:56,196 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:56,198 WARN [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:56,199 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C45463%2C1731556451536:(num 1731556484755) roll requested 2024-11-14T03:54:56,199 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556496199 2024-11-14T03:54:56,407 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:54:56,408 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:56,408 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:56,408 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:56,408 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:56,408 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:54:56,409 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556484755 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556496199 2024-11-14T03:54:56,410 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38529:38529),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-14T03:54:56,410 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556484755 is not closed yet, will try archiving it next time 2024-11-14T03:54:56,410 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556464683 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556464683 2024-11-14T03:54:56,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741839_1015 (size=7739) 2024-11-14T03:54:56,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741839_1015 (size=7739) 2024-11-14T03:54:58,400 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:00,523 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d9cfb2e227709f3283626f7a284a8953, had cached 0 bytes from a total of 25018 2024-11-14T03:55:00,604 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:02,809 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:05,013 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:07,015 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T03:55:07,015 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556507015 2024-11-14T03:55:08,953 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:55:12,024 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:12,026 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:12,026 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C45463%2C1731556451536:(num 1731556507015) roll requested 2024-11-14T03:55:12,026 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:12,027 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:12,027 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:12,027 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:12,027 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:12,027 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556496199 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556507015 2024-11-14T03:55:12,029 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45605:45605),(127.0.0.1/127.0.0.1:38529:38529)] 2024-11-14T03:55:12,029 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556496199 is not closed yet, will try archiving it next time 2024-11-14T03:55:12,030 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556512029 2024-11-14T03:55:12,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741841_1017 (size=4753) 2024-11-14T03:55:12,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741841_1017 (size=4753) 2024-11-14T03:55:17,033 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK]] 2024-11-14T03:55:17,033 WARN [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK]] 2024-11-14T03:55:17,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45463 {}] regionserver.HRegion(8855): Flush requested on d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:55:17,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9cfb2e227709f3283626f7a284a8953 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:55:17,048 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK]] 2024-11-14T03:55:17,048 WARN [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK]] 2024-11-14T03:55:19,035 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T03:55:22,036 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK]] 2024-11-14T03:55:22,036 WARN [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK]] 2024-11-14T03:55:22,037 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:22,037 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:22,037 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:22,037 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:22,037 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:22,038 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556507015 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556512029 2024-11-14T03:55:22,039 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38529:38529),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-14T03:55:22,039 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556507015 is not closed yet, will try archiving it next time 2024-11-14T03:55:22,039 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C45463%2C1731556451536:(num 1731556512029) roll requested 2024-11-14T03:55:22,040 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556522040 2024-11-14T03:55:22,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741842_1018 (size=1569) 2024-11-14T03:55:22,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/323df546ef1644dabfbd0b87b6f019cb is 1080, key is row0015/info:/1731556493790/Put/seqid=0 2024-11-14T03:55:22,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741842_1018 (size=1569) 2024-11-14T03:55:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741844_1020 (size=12509) 2024-11-14T03:55:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741844_1020 (size=12509) 2024-11-14T03:55:22,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/323df546ef1644dabfbd0b87b6f019cb 2024-11-14T03:55:22,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/323df546ef1644dabfbd0b87b6f019cb as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/323df546ef1644dabfbd0b87b6f019cb 2024-11-14T03:55:22,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/323df546ef1644dabfbd0b87b6f019cb, entries=7, sequenceid=31, filesize=12.2 K 2024-11-14T03:55:27,051 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:27,051 WARN [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:27,085 INFO [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:27,085 WARN [FSHLog-0-hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d-prefix:6a989be77d39,45463,1731556451536 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34925,DS-7cbac17e-95bb-407a-b30c-6af745554908,DISK], DatanodeInfoWithStorage[127.0.0.1:34101,DS-ff0b871a-36fe-41d2-a9d9-c1f9a7e79a3e,DISK]] 2024-11-14T03:55:27,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9cfb2e227709f3283626f7a284a8953 in 10051ms, sequenceid=31, compaction requested=true 2024-11-14T03:55:27,086 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9cfb2e227709f3283626f7a284a8953: 2024-11-14T03:55:27,086 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,086 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,086 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-14T03:55:27,086 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:55:27,086 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,086 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f because midkey is the same as first or last row 2024-11-14T03:55:27,086 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,087 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556512029 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556522040 2024-11-14T03:55:27,088 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45605:45605),(127.0.0.1/127.0.0.1:38529:38529)] 2024-11-14T03:55:27,088 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556512029 is not closed yet, will try archiving it next time 2024-11-14T03:55:27,088 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556484755 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556484755 2024-11-14T03:55:27,088 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C45463%2C1731556451536:(num 1731556527088) roll requested 2024-11-14T03:55:27,088 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556527088 2024-11-14T03:55:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9cfb2e227709f3283626f7a284a8953:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:55:27,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741843_1019 (size=438) 2024-11-14T03:55:27,090 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556496199 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556496199 2024-11-14T03:55:27,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741843_1019 (size=438) 2024-11-14T03:55:27,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:55:27,093 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556507015 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556507015 2024-11-14T03:55:27,093 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:55:27,095 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556512029 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556512029 2024-11-14T03:55:27,097 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:55:27,099 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.HStore(1541): d9cfb2e227709f3283626f7a284a8953/info is initiating minor compaction (all files) 2024-11-14T03:55:27,099 INFO [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d9cfb2e227709f3283626f7a284a8953/info in TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:55:27,100 INFO [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/1abaca85d5b2464b8e2e09e112c25e27, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/323df546ef1644dabfbd0b87b6f019cb] into tmpdir=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp, totalSize=36.6 K 2024-11-14T03:55:27,101 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6557fee24929403d8cca894676da7e3f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731556464709 2024-11-14T03:55:27,101 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1abaca85d5b2464b8e2e09e112c25e27, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731556478744 2024-11-14T03:55:27,102 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] compactions.Compactor(225): Compacting 323df546ef1644dabfbd0b87b6f019cb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731556493790 2024-11-14T03:55:27,104 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,104 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,104 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,104 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556522040 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556527088 2024-11-14T03:55:27,105 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38529:38529),(127.0.0.1/127.0.0.1:45605:45605)] 2024-11-14T03:55:27,106 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556522040 is not closed yet, will try archiving it next time 2024-11-14T03:55:27,106 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45463%2C1731556451536.1731556527106 2024-11-14T03:55:27,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741845_1021 (size=93) 2024-11-14T03:55:27,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741845_1021 (size=93) 2024-11-14T03:55:27,108 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556522040 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs/6a989be77d39%2C45463%2C1731556451536.1731556522040 2024-11-14T03:55:27,122 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,122 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,122 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,122 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,122 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:27,123 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556527088 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/WALs/6a989be77d39,45463,1731556451536/6a989be77d39%2C45463%2C1731556451536.1731556527106 2024-11-14T03:55:27,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741846_1022 (size=1258) 2024-11-14T03:55:27,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741846_1022 (size=1258) 2024-11-14T03:55:27,129 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45605:45605),(127.0.0.1/127.0.0.1:38529:38529)] 2024-11-14T03:55:27,140 INFO [RS:0;6a989be77d39:45463-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9cfb2e227709f3283626f7a284a8953#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:55:27,142 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/9c8cceb2a16545758319047e92760db2 is 1080, key is row0001/info:/1731556464709/Put/seqid=0 2024-11-14T03:55:27,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741848_1024 (size=27710) 2024-11-14T03:55:27,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741848_1024 (size=27710) 2024-11-14T03:55:27,176 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/9c8cceb2a16545758319047e92760db2 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/9c8cceb2a16545758319047e92760db2 2024-11-14T03:55:27,196 INFO [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d9cfb2e227709f3283626f7a284a8953/info of d9cfb2e227709f3283626f7a284a8953 into 9c8cceb2a16545758319047e92760db2(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:55:27,196 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d9cfb2e227709f3283626f7a284a8953: 2024-11-14T03:55:27,199 INFO [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953., storeName=d9cfb2e227709f3283626f7a284a8953/info, priority=13, startTime=1731556527088; duration=0sec 2024-11-14T03:55:27,199 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T03:55:27,199 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:55:27,199 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/9c8cceb2a16545758319047e92760db2 because midkey is the same as first or last row 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/9c8cceb2a16545758319047e92760db2 because midkey is the same as first or last row 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/9c8cceb2a16545758319047e92760db2 because midkey is the same as first or last row 2024-11-14T03:55:27,200 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:55:27,201 DEBUG [RS:0;6a989be77d39:45463-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9cfb2e227709f3283626f7a284a8953:info 2024-11-14T03:55:38,953 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:55:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45463 {}] regionserver.HRegion(8855): Flush requested on d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:55:39,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9cfb2e227709f3283626f7a284a8953 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:55:39,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/131db6998bdb4dbb918be00bbd8aba57 is 1080, key is row0022/info:/1731556527108/Put/seqid=0 2024-11-14T03:55:39,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741849_1025 (size=12509) 2024-11-14T03:55:39,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741849_1025 (size=12509) 2024-11-14T03:55:39,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/131db6998bdb4dbb918be00bbd8aba57 2024-11-14T03:55:39,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/131db6998bdb4dbb918be00bbd8aba57 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/131db6998bdb4dbb918be00bbd8aba57 2024-11-14T03:55:39,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/131db6998bdb4dbb918be00bbd8aba57, entries=7, sequenceid=42, filesize=12.2 K 2024-11-14T03:55:39,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for d9cfb2e227709f3283626f7a284a8953 in 453ms, sequenceid=42, compaction requested=false 2024-11-14T03:55:39,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9cfb2e227709f3283626f7a284a8953: 2024-11-14T03:55:39,594 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-14T03:55:39,594 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:55:39,594 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/9c8cceb2a16545758319047e92760db2 because midkey is the same as first or last row 2024-11-14T03:55:45,523 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d9cfb2e227709f3283626f7a284a8953, had cached 0 bytes from a total of 40219 2024-11-14T03:55:47,156 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:55:47,156 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:55:47,156 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:55:47,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:47,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:47,163 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:55:47,163 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:55:47,163 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=272026373, stopped=false 2024-11-14T03:55:47,164 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,44031,1731556450259 2024-11-14T03:55:47,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:47,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:47,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:47,166 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:55:47,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:47,166 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:55:47,166 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:55:47,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:47,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:47,167 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,45463,1731556451536' ***** 2024-11-14T03:55:47,167 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:55:47,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:47,168 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:55:47,168 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:55:47,168 INFO [RS:0;6a989be77d39:45463 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:55:47,168 INFO [RS:0;6a989be77d39:45463 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:55:47,168 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(3091): Received CLOSE for d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:55:47,169 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,45463,1731556451536 2024-11-14T03:55:47,169 INFO [RS:0;6a989be77d39:45463 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:55:47,169 INFO [RS:0;6a989be77d39:45463 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:45463. 2024-11-14T03:55:47,169 DEBUG [RS:0;6a989be77d39:45463 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:55:47,169 DEBUG [RS:0;6a989be77d39:45463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:47,169 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d9cfb2e227709f3283626f7a284a8953, disabling compactions & flushes 2024-11-14T03:55:47,170 INFO [RS:0;6a989be77d39:45463 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:55:47,170 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:55:47,170 INFO [RS:0;6a989be77d39:45463 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:55:47,170 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:55:47,170 INFO [RS:0;6a989be77d39:45463 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:55:47,170 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. after waiting 0 ms 2024-11-14T03:55:47,170 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:55:47,170 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:55:47,170 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d9cfb2e227709f3283626f7a284a8953 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-14T03:55:47,170 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T03:55:47,170 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, d9cfb2e227709f3283626f7a284a8953=TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.} 2024-11-14T03:55:47,170 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:55:47,171 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:55:47,171 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:55:47,171 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:55:47,171 DEBUG [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d9cfb2e227709f3283626f7a284a8953 2024-11-14T03:55:47,171 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:55:47,171 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-14T03:55:47,177 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/ab0bddf7cfd6405dbddc9b4ab900a9c6 is 1080, key is row0029/info:/1731556541143/Put/seqid=0 2024-11-14T03:55:47,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741850_1026 (size=8193) 2024-11-14T03:55:47,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741850_1026 (size=8193) 2024-11-14T03:55:47,187 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/ab0bddf7cfd6405dbddc9b4ab900a9c6 2024-11-14T03:55:47,197 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/.tmp/info/ab0bddf7cfd6405dbddc9b4ab900a9c6 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/ab0bddf7cfd6405dbddc9b4ab900a9c6 2024-11-14T03:55:47,199 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/info/e75ffc5d2c3e46768d2941fbad994684 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953./info:regioninfo/1731556455553/Put/seqid=0 2024-11-14T03:55:47,206 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/ab0bddf7cfd6405dbddc9b4ab900a9c6, entries=3, sequenceid=48, filesize=8.0 K 2024-11-14T03:55:47,207 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for d9cfb2e227709f3283626f7a284a8953 in 37ms, sequenceid=48, compaction requested=true 2024-11-14T03:55:47,208 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/1abaca85d5b2464b8e2e09e112c25e27, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/323df546ef1644dabfbd0b87b6f019cb] to archive 2024-11-14T03:55:47,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741851_1027 (size=7016) 2024-11-14T03:55:47,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741851_1027 (size=7016) 2024-11-14T03:55:47,210 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/info/e75ffc5d2c3e46768d2941fbad994684 2024-11-14T03:55:47,211 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T03:55:47,215 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/6557fee24929403d8cca894676da7e3f 2024-11-14T03:55:47,219 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/1abaca85d5b2464b8e2e09e112c25e27 to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/1abaca85d5b2464b8e2e09e112c25e27 2024-11-14T03:55:47,221 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/323df546ef1644dabfbd0b87b6f019cb to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/info/323df546ef1644dabfbd0b87b6f019cb 2024-11-14T03:55:47,237 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6a989be77d39:44031 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T03:55:47,239 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6557fee24929403d8cca894676da7e3f=12509, 1abaca85d5b2464b8e2e09e112c25e27=12509, 323df546ef1644dabfbd0b87b6f019cb=12509] 2024-11-14T03:55:47,240 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/ns/c8e772610d2546299e1056363c226a6d is 43, key is default/ns:d/1731556454191/Put/seqid=0 2024-11-14T03:55:47,246 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/default/TestLogRolling-testSlowSyncLogRolling/d9cfb2e227709f3283626f7a284a8953/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-14T03:55:47,250 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:55:47,250 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d9cfb2e227709f3283626f7a284a8953: Waiting for close lock at 1731556547169Running coprocessor pre-close hooks at 1731556547169Disabling compacts and flushes for region at 1731556547169Disabling writes for close at 1731556547170 (+1 ms)Obtaining lock to block concurrent updates at 1731556547170Preparing flush snapshotting stores in d9cfb2e227709f3283626f7a284a8953 at 1731556547170Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731556547171 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. at 1731556547172 (+1 ms)Flushing d9cfb2e227709f3283626f7a284a8953/info: creating writer at 1731556547172Flushing d9cfb2e227709f3283626f7a284a8953/info: appending metadata at 1731556547176 (+4 ms)Flushing d9cfb2e227709f3283626f7a284a8953/info: closing flushed file at 1731556547176Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25ca31a3: reopening flushed file at 1731556547196 (+20 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for d9cfb2e227709f3283626f7a284a8953 in 37ms, sequenceid=48, compaction requested=true at 1731556547207 (+11 ms)Writing region close event to WAL at 1731556547240 (+33 ms)Running coprocessor post-close hooks at 1731556547247 (+7 ms)Closed at 1731556547250 (+3 ms) 2024-11-14T03:55:47,251 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731556454583.d9cfb2e227709f3283626f7a284a8953. 2024-11-14T03:55:47,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741852_1028 (size=5153) 2024-11-14T03:55:47,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741852_1028 (size=5153) 2024-11-14T03:55:47,253 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/ns/c8e772610d2546299e1056363c226a6d 2024-11-14T03:55:47,282 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/table/7273fdd4f94b443b850416901951bbbf is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731556455577/Put/seqid=0 2024-11-14T03:55:47,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741853_1029 (size=5396) 2024-11-14T03:55:47,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741853_1029 (size=5396) 2024-11-14T03:55:47,289 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/table/7273fdd4f94b443b850416901951bbbf 2024-11-14T03:55:47,297 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/info/e75ffc5d2c3e46768d2941fbad994684 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/info/e75ffc5d2c3e46768d2941fbad994684 2024-11-14T03:55:47,305 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/info/e75ffc5d2c3e46768d2941fbad994684, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T03:55:47,307 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/ns/c8e772610d2546299e1056363c226a6d as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/ns/c8e772610d2546299e1056363c226a6d 2024-11-14T03:55:47,314 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/ns/c8e772610d2546299e1056363c226a6d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T03:55:47,316 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/.tmp/table/7273fdd4f94b443b850416901951bbbf as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/table/7273fdd4f94b443b850416901951bbbf 2024-11-14T03:55:47,324 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/table/7273fdd4f94b443b850416901951bbbf, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T03:55:47,326 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 155ms, sequenceid=11, compaction requested=false 2024-11-14T03:55:47,333 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T03:55:47,334 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:55:47,334 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:55:47,334 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556547170Running coprocessor pre-close hooks at 1731556547170Disabling compacts and flushes for region at 1731556547170Disabling writes for close at 1731556547171 (+1 ms)Obtaining lock to block concurrent updates at 1731556547171Preparing flush snapshotting stores in 1588230740 at 1731556547171Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731556547172 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731556547173 (+1 ms)Flushing 1588230740/info: creating writer at 1731556547173Flushing 1588230740/info: appending metadata at 1731556547198 (+25 ms)Flushing 1588230740/info: closing flushed file at 1731556547198Flushing 1588230740/ns: creating writer at 1731556547220 (+22 ms)Flushing 1588230740/ns: appending metadata at 1731556547239 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731556547239Flushing 1588230740/table: creating writer at 1731556547260 (+21 ms)Flushing 1588230740/table: appending metadata at 1731556547281 (+21 ms)Flushing 1588230740/table: closing flushed file at 1731556547282 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@144a91c0: reopening flushed file at 1731556547296 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@482b5c9: reopening flushed file at 1731556547306 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ed48647: reopening flushed file at 1731556547315 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 155ms, sequenceid=11, compaction requested=false at 1731556547326 (+11 ms)Writing region close event to WAL at 1731556547328 (+2 ms)Running coprocessor post-close hooks at 1731556547334 (+6 ms)Closed at 1731556547334 2024-11-14T03:55:47,335 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:55:47,371 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,45463,1731556451536; all regions closed. 2024-11-14T03:55:47,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,373 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741834_1010 (size=3066) 2024-11-14T03:55:47,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741834_1010 (size=3066) 2024-11-14T03:55:47,380 DEBUG [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs 2024-11-14T03:55:47,380 INFO [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C45463%2C1731556451536.meta:.meta(num 1731556453999) 2024-11-14T03:55:47,381 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,381 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,381 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,381 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741847_1023 (size=12695) 2024-11-14T03:55:47,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741847_1023 (size=12695) 2024-11-14T03:55:47,387 DEBUG [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/oldWALs 2024-11-14T03:55:47,387 INFO [RS:0;6a989be77d39:45463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C45463%2C1731556451536:(num 1731556527106) 2024-11-14T03:55:47,387 DEBUG [RS:0;6a989be77d39:45463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:47,387 INFO [RS:0;6a989be77d39:45463 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:55:47,387 INFO [RS:0;6a989be77d39:45463 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:55:47,388 INFO [RS:0;6a989be77d39:45463 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T03:55:47,388 INFO [RS:0;6a989be77d39:45463 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:55:47,388 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:55:47,388 INFO [RS:0;6a989be77d39:45463 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45463 2024-11-14T03:55:47,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:55:47,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,45463,1731556451536 2024-11-14T03:55:47,397 INFO [RS:0;6a989be77d39:45463 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:55:47,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,45463,1731556451536] 2024-11-14T03:55:47,400 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,45463,1731556451536 already deleted, retry=false 2024-11-14T03:55:47,400 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,45463,1731556451536 expired; onlineServers=0 2024-11-14T03:55:47,400 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,44031,1731556450259' ***** 2024-11-14T03:55:47,400 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:55:47,401 INFO [M:0;6a989be77d39:44031 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:55:47,401 INFO [M:0;6a989be77d39:44031 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:55:47,401 DEBUG [M:0;6a989be77d39:44031 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:55:47,401 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:55:47,401 DEBUG [M:0;6a989be77d39:44031 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:55:47,401 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556453204 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556453204,5,FailOnTimeoutGroup] 2024-11-14T03:55:47,401 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556453203 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556453203,5,FailOnTimeoutGroup] 2024-11-14T03:55:47,401 INFO [M:0;6a989be77d39:44031 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:55:47,401 INFO [M:0;6a989be77d39:44031 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:55:47,402 DEBUG [M:0;6a989be77d39:44031 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:55:47,402 INFO [M:0;6a989be77d39:44031 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:55:47,402 INFO [M:0;6a989be77d39:44031 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:55:47,402 INFO [M:0;6a989be77d39:44031 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:55:47,403 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:55:47,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:55:47,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:47,403 DEBUG [M:0;6a989be77d39:44031 {}] zookeeper.ZKUtil(347): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T03:55:47,403 WARN [M:0;6a989be77d39:44031 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T03:55:47,404 INFO [M:0;6a989be77d39:44031 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/.lastflushedseqids 2024-11-14T03:55:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741854_1030 (size=130) 2024-11-14T03:55:47,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741854_1030 (size=130) 2024-11-14T03:55:47,421 INFO [M:0;6a989be77d39:44031 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:55:47,421 INFO [M:0;6a989be77d39:44031 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:55:47,421 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:55:47,422 INFO [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:47,422 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:47,422 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:55:47,422 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:47,422 INFO [M:0;6a989be77d39:44031 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-14T03:55:47,445 DEBUG [M:0;6a989be77d39:44031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8d667d9808b437382fe9de9fbabbcca is 82, key is hbase:meta,,1/info:regioninfo/1731556454106/Put/seqid=0 2024-11-14T03:55:47,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741855_1031 (size=5672) 2024-11-14T03:55:47,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741855_1031 (size=5672) 2024-11-14T03:55:47,456 INFO [M:0;6a989be77d39:44031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8d667d9808b437382fe9de9fbabbcca 2024-11-14T03:55:47,481 DEBUG [M:0;6a989be77d39:44031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3590537aa66347d58a87380fdde53452 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731556455591/Put/seqid=0 2024-11-14T03:55:47,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741856_1032 (size=6248) 2024-11-14T03:55:47,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741856_1032 (size=6248) 2024-11-14T03:55:47,489 INFO [M:0;6a989be77d39:44031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3590537aa66347d58a87380fdde53452 2024-11-14T03:55:47,489 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:55:47,495 INFO [M:0;6a989be77d39:44031 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3590537aa66347d58a87380fdde53452 2024-11-14T03:55:47,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:47,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45463-0x100ef618fdb0001, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:47,501 INFO [RS:0;6a989be77d39:45463 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:55:47,501 INFO [RS:0;6a989be77d39:45463 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,45463,1731556451536; zookeeper connection closed. 2024-11-14T03:55:47,501 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3df3f52a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3df3f52a 2024-11-14T03:55:47,502 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T03:55:47,512 DEBUG [M:0;6a989be77d39:44031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/736e3fcc6f1f483caa7c7e970bdadac7 is 69, key is 6a989be77d39,45463,1731556451536/rs:state/1731556453358/Put/seqid=0 2024-11-14T03:55:47,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741857_1033 (size=5156) 2024-11-14T03:55:47,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741857_1033 (size=5156) 2024-11-14T03:55:47,520 INFO [M:0;6a989be77d39:44031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/736e3fcc6f1f483caa7c7e970bdadac7 2024-11-14T03:55:47,545 DEBUG [M:0;6a989be77d39:44031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa3e78fc7dbc4fbb8db64039e087f0a8 is 52, key is load_balancer_on/state:d/1731556454498/Put/seqid=0 2024-11-14T03:55:47,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741858_1034 (size=5056) 2024-11-14T03:55:47,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741858_1034 (size=5056) 2024-11-14T03:55:47,560 INFO [M:0;6a989be77d39:44031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa3e78fc7dbc4fbb8db64039e087f0a8 2024-11-14T03:55:47,571 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8d667d9808b437382fe9de9fbabbcca as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8d667d9808b437382fe9de9fbabbcca 2024-11-14T03:55:47,578 INFO [M:0;6a989be77d39:44031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8d667d9808b437382fe9de9fbabbcca, entries=8, sequenceid=59, filesize=5.5 K 2024-11-14T03:55:47,579 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3590537aa66347d58a87380fdde53452 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3590537aa66347d58a87380fdde53452 2024-11-14T03:55:47,587 INFO [M:0;6a989be77d39:44031 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3590537aa66347d58a87380fdde53452 2024-11-14T03:55:47,587 INFO [M:0;6a989be77d39:44031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3590537aa66347d58a87380fdde53452, entries=6, sequenceid=59, filesize=6.1 K 2024-11-14T03:55:47,589 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/736e3fcc6f1f483caa7c7e970bdadac7 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/736e3fcc6f1f483caa7c7e970bdadac7 2024-11-14T03:55:47,596 INFO [M:0;6a989be77d39:44031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/736e3fcc6f1f483caa7c7e970bdadac7, entries=1, sequenceid=59, filesize=5.0 K 2024-11-14T03:55:47,597 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa3e78fc7dbc4fbb8db64039e087f0a8 as hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aa3e78fc7dbc4fbb8db64039e087f0a8 2024-11-14T03:55:47,604 INFO [M:0;6a989be77d39:44031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aa3e78fc7dbc4fbb8db64039e087f0a8, entries=1, sequenceid=59, filesize=4.9 K 2024-11-14T03:55:47,607 INFO [M:0;6a989be77d39:44031 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 185ms, sequenceid=59, compaction requested=false 2024-11-14T03:55:47,609 INFO [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:47,609 DEBUG [M:0;6a989be77d39:44031 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556547421Disabling compacts and flushes for region at 1731556547421Disabling writes for close at 1731556547422 (+1 ms)Obtaining lock to block concurrent updates at 1731556547422Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556547422Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731556547426 (+4 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556547427 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556547427Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556547444 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556547444Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556547463 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556547480 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556547480Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556547495 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556547511 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556547511Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556547528 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556547544 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556547544Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78193bab: reopening flushed file at 1731556547570 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@785ecb60: reopening flushed file at 1731556547578 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ab7923a: reopening flushed file at 1731556547588 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fee4d6: reopening flushed file at 1731556547596 (+8 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 185ms, sequenceid=59, compaction requested=false at 1731556547607 (+11 ms)Writing region close event to WAL at 1731556547609 (+2 ms)Closed at 1731556547609 2024-11-14T03:55:47,610 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,610 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,611 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,611 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,611 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:47,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34101 is added to blk_1073741830_1006 (size=27985) 2024-11-14T03:55:47,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34925 is added to blk_1073741830_1006 (size=27985) 2024-11-14T03:55:47,614 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:55:47,615 INFO [M:0;6a989be77d39:44031 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:55:47,615 INFO [M:0;6a989be77d39:44031 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44031 2024-11-14T03:55:47,615 INFO [M:0;6a989be77d39:44031 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:55:47,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:47,717 INFO [M:0;6a989be77d39:44031 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:55:47,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44031-0x100ef618fdb0000, quorum=127.0.0.1:58995, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:47,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:47,723 WARN [BP-251123023-172.17.0.2-1731556446590 heartbeating to localhost/127.0.0.1:45187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-251123023-172.17.0.2-1731556446590 (Datanode Uuid 730c4d3f-4b73-4006-abf0-3d6ba2d70bf3) service to localhost/127.0.0.1:45187 2024-11-14T03:55:47,724 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:55:47,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:55:47,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:55:47,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir/,STOPPED} 2024-11-14T03:55:47,725 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data3/current/BP-251123023-172.17.0.2-1731556446590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:47,725 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data4/current/BP-251123023-172.17.0.2-1731556446590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:47,729 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:55:47,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:47,732 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:55:47,732 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:55:47,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:55:47,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir/,STOPPED} 2024-11-14T03:55:47,734 WARN [BP-251123023-172.17.0.2-1731556446590 heartbeating to localhost/127.0.0.1:45187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:55:47,734 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:55:47,734 WARN [BP-251123023-172.17.0.2-1731556446590 heartbeating to localhost/127.0.0.1:45187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-251123023-172.17.0.2-1731556446590 (Datanode Uuid 8a269981-82b0-44e3-92f2-8a000f970dc0) service to localhost/127.0.0.1:45187 2024-11-14T03:55:47,734 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:55:47,734 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data1/current/BP-251123023-172.17.0.2-1731556446590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:47,735 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/cluster_abdf2db3-3dc8-7945-9ecd-7fcbef81e32e/data/data2/current/BP-251123023-172.17.0.2-1731556446590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:47,735 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:55:47,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:55:47,745 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:55:47,745 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:55:47,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:55:47,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir/,STOPPED} 2024-11-14T03:55:47,754 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:55:47,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:55:47,796 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=79 (was 12) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/6a989be77d39:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45187 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45187 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/6a989be77d39:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45187 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45187 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5f8f8226 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/6a989be77d39:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45187 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=395 (was 434), ProcessCount=11 (was 11), AvailableMemoryMB=5397 (was 6757) 2024-11-14T03:55:47,803 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=80, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=395, ProcessCount=11, AvailableMemoryMB=5396 2024-11-14T03:55:47,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.log.dir so I do NOT create it in target/test-data/27eda811-622c-b0d9-0449-e641d141fd93 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b120cc09-cc54-5e13-0974-dc75ac68d74a/hadoop.tmp.dir so I do NOT create it in target/test-data/27eda811-622c-b0d9-0449-e641d141fd93 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382, deleteOnExit=true 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/test.cache.data in system properties and HBase conf 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:55:47,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:55:47,805 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:55:47,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:55:47,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:55:47,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:55:47,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:55:47,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:55:47,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:55:47,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:55:47,821 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:55:47,899 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:47,905 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:55:47,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:55:47,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:55:47,907 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:55:47,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:47,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c443180{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:55:47,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10885b70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:55:48,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@245f97f3{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/java.io.tmpdir/jetty-localhost-36499-hadoop-hdfs-3_4_1-tests_jar-_-any-2818426643804327865/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:55:48,030 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@219d080{HTTP/1.1, (http/1.1)}{localhost:36499} 2024-11-14T03:55:48,030 INFO [Time-limited test {}] server.Server(415): Started @104042ms 2024-11-14T03:55:48,046 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:55:48,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:48,120 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:55:48,121 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:55:48,121 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:55:48,121 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:55:48,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b5aedfa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:55:48,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d281952{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:55:48,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd218c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/java.io.tmpdir/jetty-localhost-35817-hadoop-hdfs-3_4_1-tests_jar-_-any-15402573436501867772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:48,249 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a9b9566{HTTP/1.1, (http/1.1)}{localhost:35817} 2024-11-14T03:55:48,249 INFO [Time-limited test {}] server.Server(415): Started @104261ms 2024-11-14T03:55:48,252 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:55:48,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:48,304 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:55:48,305 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:55:48,305 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:55:48,305 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:55:48,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30ebe7e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:55:48,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@254e0164{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:55:48,375 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data1/current/BP-283798774-172.17.0.2-1731556547840/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:48,376 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data2/current/BP-283798774-172.17.0.2-1731556547840/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:48,405 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:55:48,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58bcdd54f5e97664 with lease ID 0xd2b6599bfe7420cb: Processing first storage report for DS-238b264c-cfe6-4a24-b61e-4c4dbb825816 from datanode DatanodeRegistration(127.0.0.1:46337, datanodeUuid=6dfe2db9-c1d3-4f69-a179-bed7c58ca055, infoPort=43491, infoSecurePort=0, ipcPort=40561, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840) 2024-11-14T03:55:48,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58bcdd54f5e97664 with lease ID 0xd2b6599bfe7420cb: from storage DS-238b264c-cfe6-4a24-b61e-4c4dbb825816 node DatanodeRegistration(127.0.0.1:46337, datanodeUuid=6dfe2db9-c1d3-4f69-a179-bed7c58ca055, infoPort=43491, infoSecurePort=0, ipcPort=40561, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:48,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58bcdd54f5e97664 with lease ID 0xd2b6599bfe7420cb: Processing first storage report for DS-894e015c-2a6d-4f75-a7e3-dd38c8367cb1 from datanode DatanodeRegistration(127.0.0.1:46337, datanodeUuid=6dfe2db9-c1d3-4f69-a179-bed7c58ca055, infoPort=43491, infoSecurePort=0, ipcPort=40561, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840) 2024-11-14T03:55:48,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58bcdd54f5e97664 with lease ID 0xd2b6599bfe7420cb: from storage DS-894e015c-2a6d-4f75-a7e3-dd38c8367cb1 node DatanodeRegistration(127.0.0.1:46337, datanodeUuid=6dfe2db9-c1d3-4f69-a179-bed7c58ca055, infoPort=43491, infoSecurePort=0, ipcPort=40561, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:48,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@156b26b5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/java.io.tmpdir/jetty-localhost-42937-hadoop-hdfs-3_4_1-tests_jar-_-any-16439748096375814960/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:48,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dd99d02{HTTP/1.1, (http/1.1)}{localhost:42937} 2024-11-14T03:55:48,443 INFO [Time-limited test {}] server.Server(415): Started @104456ms 2024-11-14T03:55:48,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:55:48,546 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data3/current/BP-283798774-172.17.0.2-1731556547840/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:48,546 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data4/current/BP-283798774-172.17.0.2-1731556547840/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:48,564 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:55:48,567 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ba8c6081b5f6574 with lease ID 0xd2b6599bfe7420cc: Processing first storage report for DS-702c06bb-50e4-4160-a250-2a4a6e357aa8 from datanode DatanodeRegistration(127.0.0.1:35803, datanodeUuid=02ac9d1b-e712-4487-ac30-40cec3d33cce, infoPort=34753, infoSecurePort=0, ipcPort=37083, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840) 2024-11-14T03:55:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ba8c6081b5f6574 with lease ID 0xd2b6599bfe7420cc: from storage DS-702c06bb-50e4-4160-a250-2a4a6e357aa8 node DatanodeRegistration(127.0.0.1:35803, datanodeUuid=02ac9d1b-e712-4487-ac30-40cec3d33cce, infoPort=34753, infoSecurePort=0, ipcPort=37083, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ba8c6081b5f6574 with lease ID 0xd2b6599bfe7420cc: Processing first storage report for DS-28ee0e6d-2c0a-46f0-b0fd-33819159fe8f from datanode DatanodeRegistration(127.0.0.1:35803, datanodeUuid=02ac9d1b-e712-4487-ac30-40cec3d33cce, infoPort=34753, infoSecurePort=0, ipcPort=37083, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840) 2024-11-14T03:55:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ba8c6081b5f6574 with lease ID 0xd2b6599bfe7420cc: from storage DS-28ee0e6d-2c0a-46f0-b0fd-33819159fe8f node DatanodeRegistration(127.0.0.1:35803, datanodeUuid=02ac9d1b-e712-4487-ac30-40cec3d33cce, infoPort=34753, infoSecurePort=0, ipcPort=37083, storageInfo=lv=-57;cid=testClusterID;nsid=1892841783;c=1731556547840), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:48,580 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93 2024-11-14T03:55:48,583 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/zookeeper_0, clientPort=57134, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:55:48,584 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57134 2024-11-14T03:55:48,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:55:48,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:55:48,612 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc with version=8 2024-11-14T03:55:48,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:55:48,615 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:55:48,615 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:48,615 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:48,615 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:55:48,615 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:48,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:55:48,616 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:55:48,616 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:55:48,617 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45611 2024-11-14T03:55:48,619 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45611 connecting to ZooKeeper ensemble=127.0.0.1:57134 2024-11-14T03:55:48,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:456110x0, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:55:48,625 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45611-0x100ef6314910000 connected 2024-11-14T03:55:48,642 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,643 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,646 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:48,646 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc, hbase.cluster.distributed=false 2024-11-14T03:55:48,648 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:55:48,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45611 2024-11-14T03:55:48,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45611 2024-11-14T03:55:48,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45611 2024-11-14T03:55:48,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45611 2024-11-14T03:55:48,650 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45611 2024-11-14T03:55:48,668 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:55:48,668 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:55:48,669 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39909 2024-11-14T03:55:48,670 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39909 connecting to ZooKeeper ensemble=127.0.0.1:57134 2024-11-14T03:55:48,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399090x0, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:55:48,679 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:399090x0, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:48,679 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39909-0x100ef6314910001 connected 2024-11-14T03:55:48,679 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:55:48,681 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:55:48,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:55:48,682 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:55:48,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39909 2024-11-14T03:55:48,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39909 2024-11-14T03:55:48,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39909 2024-11-14T03:55:48,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39909 2024-11-14T03:55:48,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39909 2024-11-14T03:55:48,702 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:45611 2024-11-14T03:55:48,702 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,45611,1731556548614 2024-11-14T03:55:48,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:48,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:48,705 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,45611,1731556548614 2024-11-14T03:55:48,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:55:48,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:48,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:48,707 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:55:48,708 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,45611,1731556548614 from backup master directory 2024-11-14T03:55:48,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,45611,1731556548614 2024-11-14T03:55:48,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:48,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:48,710 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:55:48,710 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,45611,1731556548614 2024-11-14T03:55:48,716 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/hbase.id] with ID: c73484c4-4323-4ed9-9a62-871f541825ad 2024-11-14T03:55:48,716 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/.tmp/hbase.id 2024-11-14T03:55:48,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:55:48,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:55:48,726 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/.tmp/hbase.id]:[hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/hbase.id] 2024-11-14T03:55:48,743 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:48,744 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:55:48,745 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T03:55:48,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:48,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:48,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:55:48,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:55:48,790 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:55:48,791 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:55:48,794 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:48,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:55:48,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:55:48,829 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store 2024-11-14T03:55:48,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:55:48,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:55:49,241 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:49,242 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:55:49,242 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:49,242 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:49,242 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:55:49,242 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:49,242 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:49,242 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556549242Disabling compacts and flushes for region at 1731556549242Disabling writes for close at 1731556549242Writing region close event to WAL at 1731556549242Closed at 1731556549242 2024-11-14T03:55:49,243 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/.initializing 2024-11-14T03:55:49,243 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/WALs/6a989be77d39,45611,1731556548614 2024-11-14T03:55:49,247 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C45611%2C1731556548614, suffix=, logDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/WALs/6a989be77d39,45611,1731556548614, archiveDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/oldWALs, maxLogs=10 2024-11-14T03:55:49,248 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C45611%2C1731556548614.1731556549248 2024-11-14T03:55:49,266 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/WALs/6a989be77d39,45611,1731556548614/6a989be77d39%2C45611%2C1731556548614.1731556549248 2024-11-14T03:55:49,270 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34753:34753),(127.0.0.1/127.0.0.1:43491:43491)] 2024-11-14T03:55:49,277 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:55:49,277 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:49,277 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,277 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:55:49,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:49,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:55:49,284 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:49,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:55:49,288 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:49,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:55:49,290 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:49,291 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,291 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,292 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,293 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,293 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,294 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:55:49,295 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:49,298 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:55:49,299 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840540, jitterRate=0.06880217790603638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:55:49,300 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556549277Initializing all the Stores at 1731556549279 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556549279Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556549279Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556549279Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556549279Cleaning up temporary data from old regions at 1731556549294 (+15 ms)Region opened successfully at 1731556549300 (+6 ms) 2024-11-14T03:55:49,301 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:55:49,305 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40f59fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:55:49,306 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:55:49,306 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:55:49,306 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:55:49,307 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:55:49,307 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T03:55:49,308 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T03:55:49,308 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:55:49,319 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:55:49,320 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:55:49,322 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:55:49,322 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:55:49,323 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:55:49,324 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:55:49,325 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:55:49,327 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:55:49,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:55:49,330 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:55:49,331 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:55:49,333 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:55:49,335 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:55:49,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:49,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:49,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,338 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,45611,1731556548614, sessionid=0x100ef6314910000, setting cluster-up flag (Was=false) 2024-11-14T03:55:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,348 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:55:49,350 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,45611,1731556548614 2024-11-14T03:55:49,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,360 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:55:49,361 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,45611,1731556548614 2024-11-14T03:55:49,362 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:55:49,365 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:49,365 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:55:49,365 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:55:49,365 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,45611,1731556548614 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:55:49,367 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556579371 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:55:49,371 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,372 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:55:49,372 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:55:49,372 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:55:49,372 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:49,372 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:55:49,372 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:55:49,372 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:55:49,373 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556549372,5,FailOnTimeoutGroup] 2024-11-14T03:55:49,373 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556549373,5,FailOnTimeoutGroup] 2024-11-14T03:55:49,373 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,373 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:55:49,373 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,373 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,374 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,374 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:55:49,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:55:49,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:55:49,388 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:55:49,388 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc 2024-11-14T03:55:49,392 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(746): ClusterId : c73484c4-4323-4ed9-9a62-871f541825ad 2024-11-14T03:55:49,392 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:55:49,396 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:55:49,396 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:55:49,399 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:55:49,399 DEBUG [RS:0;6a989be77d39:39909 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30539560, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:55:49,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:55:49,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:55:49,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:49,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:55:49,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:55:49,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:49,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:55:49,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:55:49,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:49,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:55:49,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:55:49,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:49,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:55:49,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:55:49,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:49,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:49,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:55:49,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740 2024-11-14T03:55:49,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740 2024-11-14T03:55:49,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:55:49,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:55:49,416 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:39909 2024-11-14T03:55:49,416 INFO [RS:0;6a989be77d39:39909 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:55:49,416 INFO [RS:0;6a989be77d39:39909 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:55:49,416 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:55:49,416 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:55:49,417 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,45611,1731556548614 with port=39909, startcode=1731556548668 2024-11-14T03:55:49,418 DEBUG [RS:0;6a989be77d39:39909 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:55:49,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:55:49,421 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:55:49,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45611 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,422 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:55:49,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45611 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,423 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808924, jitterRate=0.028600573539733887}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:55:49,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556549401Initializing all the Stores at 1731556549402 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556549402Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556549402Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556549402Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556549402Cleaning up temporary data from old regions at 1731556549415 (+13 ms)Region opened successfully at 1731556549424 (+9 ms) 2024-11-14T03:55:49,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:55:49,424 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:55:49,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:55:49,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:55:49,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:55:49,424 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc 2024-11-14T03:55:49,424 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35605 2024-11-14T03:55:49,424 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:55:49,425 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:55:49,425 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556549424Disabling compacts and flushes for region at 1731556549424Disabling writes for close at 1731556549424Writing region close event to WAL at 1731556549425 (+1 ms)Closed at 1731556549425 2024-11-14T03:55:49,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:55:49,427 DEBUG [RS:0;6a989be77d39:39909 {}] zookeeper.ZKUtil(111): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,427 WARN [RS:0;6a989be77d39:39909 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:55:49,427 INFO [RS:0;6a989be77d39:39909 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:49,427 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:49,428 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:55:49,428 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/WALs/6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:55:49,430 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:55:49,432 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:55:49,438 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,39909,1731556548668] 2024-11-14T03:55:49,448 INFO [RS:0;6a989be77d39:39909 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:55:49,453 INFO [RS:0;6a989be77d39:39909 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:55:49,454 INFO [RS:0;6a989be77d39:39909 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:55:49,454 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,455 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:55:49,456 INFO [RS:0;6a989be77d39:39909 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:55:49,457 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,457 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,458 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,458 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,458 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,458 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:49,458 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:55:49,458 DEBUG [RS:0;6a989be77d39:39909 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:55:49,462 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,462 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,462 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,463 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,463 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,463 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39909,1731556548668-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:55:49,489 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:55:49,489 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39909,1731556548668-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,489 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,489 INFO [RS:0;6a989be77d39:39909 {}] regionserver.Replication(171): 6a989be77d39,39909,1731556548668 started 2024-11-14T03:55:49,515 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:49,525 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,39909,1731556548668, RpcServer on 6a989be77d39/172.17.0.2:39909, sessionid=0x100ef6314910001 2024-11-14T03:55:49,525 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:55:49,525 DEBUG [RS:0;6a989be77d39:39909 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,525 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,39909,1731556548668' 2024-11-14T03:55:49,525 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:55:49,526 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:55:49,527 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:55:49,527 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:55:49,527 DEBUG [RS:0;6a989be77d39:39909 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,527 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,39909,1731556548668' 2024-11-14T03:55:49,527 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:55:49,528 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:55:49,529 DEBUG [RS:0;6a989be77d39:39909 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:55:49,529 INFO [RS:0;6a989be77d39:39909 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:55:49,529 INFO [RS:0;6a989be77d39:39909 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:55:49,582 WARN [6a989be77d39:45611 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T03:55:49,631 INFO [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C39909%2C1731556548668, suffix=, logDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/WALs/6a989be77d39,39909,1731556548668, archiveDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/oldWALs, maxLogs=32 2024-11-14T03:55:49,634 INFO [RS:0;6a989be77d39:39909 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C39909%2C1731556548668.1731556549633 2024-11-14T03:55:49,641 INFO [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/WALs/6a989be77d39,39909,1731556548668/6a989be77d39%2C39909%2C1731556548668.1731556549633 2024-11-14T03:55:49,644 DEBUG [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34753:34753),(127.0.0.1/127.0.0.1:43491:43491)] 2024-11-14T03:55:49,832 DEBUG [6a989be77d39:45611 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:55:49,833 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,39909,1731556548668 2024-11-14T03:55:49,835 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,39909,1731556548668, state=OPENING 2024-11-14T03:55:49,837 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:55:49,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:49,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:49,840 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:49,840 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:55:49,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,39909,1731556548668}] 2024-11-14T03:55:49,994 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:55:49,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36773, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:55:50,001 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:55:50,001 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:50,004 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C39909%2C1731556548668.meta, suffix=.meta, logDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/WALs/6a989be77d39,39909,1731556548668, archiveDir=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/oldWALs, maxLogs=32 2024-11-14T03:55:50,006 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C39909%2C1731556548668.meta.1731556550005.meta 2024-11-14T03:55:50,012 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/WALs/6a989be77d39,39909,1731556548668/6a989be77d39%2C39909%2C1731556548668.meta.1731556550005.meta 2024-11-14T03:55:50,017 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43491:43491),(127.0.0.1/127.0.0.1:34753:34753)] 2024-11-14T03:55:50,019 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:55:50,019 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:55:50,019 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:55:50,020 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:55:50,020 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:55:50,020 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:50,020 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:55:50,020 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:55:50,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:55:50,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:55:50,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:50,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:50,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:55:50,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:55:50,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:50,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:50,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:55:50,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:55:50,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:50,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:50,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:55:50,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:55:50,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:50,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:50,034 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:55:50,035 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740 2024-11-14T03:55:50,037 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740 2024-11-14T03:55:50,038 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:55:50,038 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:55:50,039 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:55:50,041 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:55:50,042 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716476, jitterRate=-0.08895370364189148}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:55:50,043 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:55:50,044 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556550020Writing region info on filesystem at 1731556550020Initializing all the Stores at 1731556550022 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556550022Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556550022Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556550022Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556550022Cleaning up temporary data from old regions at 1731556550038 (+16 ms)Running coprocessor post-open hooks at 1731556550043 (+5 ms)Region opened successfully at 1731556550044 (+1 ms) 2024-11-14T03:55:50,045 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556549993 2024-11-14T03:55:50,049 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:55:50,049 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:55:50,050 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,39909,1731556548668 2024-11-14T03:55:50,051 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,39909,1731556548668, state=OPEN 2024-11-14T03:55:50,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:55:50,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:55:50,057 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,39909,1731556548668 2024-11-14T03:55:50,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:50,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:50,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:55:50,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,39909,1731556548668 in 217 msec 2024-11-14T03:55:50,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:55:50,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 633 msec 2024-11-14T03:55:50,067 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:50,067 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:55:50,069 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:55:50,069 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,39909,1731556548668, seqNum=-1] 2024-11-14T03:55:50,070 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:55:50,072 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51187, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:55:50,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 714 msec 2024-11-14T03:55:50,080 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556550080, completionTime=-1 2024-11-14T03:55:50,080 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:55:50,080 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:55:50,082 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:55:50,082 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556610082 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556670083 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45611,1731556548614-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45611,1731556548614-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45611,1731556548614-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:45611, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,083 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,085 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,086 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.381sec 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45611,1731556548614-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:55:50,091 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45611,1731556548614-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:55:50,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a89c49a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:55:50,094 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,45611,-1 for getting cluster id 2024-11-14T03:55:50,094 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:55:50,096 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:55:50,096 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:55:50,096 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,45611,1731556548614-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:50,102 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c73484c4-4323-4ed9-9a62-871f541825ad' 2024-11-14T03:55:50,103 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:55:50,103 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c73484c4-4323-4ed9-9a62-871f541825ad" 2024-11-14T03:55:50,104 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f2a6128, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:55:50,104 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,45611,-1] 2024-11-14T03:55:50,104 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:55:50,105 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:50,107 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:55:50,108 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6535c161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:55:50,109 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:55:50,111 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,39909,1731556548668, seqNum=-1] 2024-11-14T03:55:50,111 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:55:50,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:55:50,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,45611,1731556548614 2024-11-14T03:55:50,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:50,124 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:55:50,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:55:50,124 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:55:50,125 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:55:50,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:50,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:50,125 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:55:50,125 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:55:50,125 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=857251464, stopped=false 2024-11-14T03:55:50,125 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,45611,1731556548614 2024-11-14T03:55:50,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:50,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:50,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:50,128 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:55:50,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:50,129 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:50,129 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:55:50,129 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:50,130 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:55:50,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:50,133 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,39909,1731556548668' ***** 2024-11-14T03:55:50,133 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:55:50,134 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:55:50,134 INFO [RS:0;6a989be77d39:39909 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:55:50,134 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:55:50,134 INFO [RS:0;6a989be77d39:39909 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:55:50,134 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,39909,1731556548668 2024-11-14T03:55:50,134 INFO [RS:0;6a989be77d39:39909 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:55:50,135 INFO [RS:0;6a989be77d39:39909 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:39909. 2024-11-14T03:55:50,135 DEBUG [RS:0;6a989be77d39:39909 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:55:50,135 DEBUG [RS:0;6a989be77d39:39909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:50,135 INFO [RS:0;6a989be77d39:39909 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:55:50,135 INFO [RS:0;6a989be77d39:39909 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:55:50,135 INFO [RS:0;6a989be77d39:39909 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:55:50,135 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:55:50,147 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T03:55:50,148 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T03:55:50,148 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:55:50,148 DEBUG [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T03:55:50,148 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:55:50,148 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:55:50,148 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:55:50,148 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:55:50,148 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T03:55:50,177 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/.tmp/ns/467110cd7ee44cbfb85e7d1a6c3dbf15 is 43, key is default/ns:d/1731556550072/Put/seqid=0 2024-11-14T03:55:50,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741835_1011 (size=5153) 2024-11-14T03:55:50,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741835_1011 (size=5153) 2024-11-14T03:55:50,217 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/.tmp/ns/467110cd7ee44cbfb85e7d1a6c3dbf15 2024-11-14T03:55:50,228 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/.tmp/ns/467110cd7ee44cbfb85e7d1a6c3dbf15 as hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/ns/467110cd7ee44cbfb85e7d1a6c3dbf15 2024-11-14T03:55:50,238 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/ns/467110cd7ee44cbfb85e7d1a6c3dbf15, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T03:55:50,240 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=6, compaction requested=false 2024-11-14T03:55:50,240 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T03:55:50,254 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T03:55:50,256 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:55:50,256 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:55:50,256 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556550148Running coprocessor pre-close hooks at 1731556550148Disabling compacts and flushes for region at 1731556550148Disabling writes for close at 1731556550148Obtaining lock to block concurrent updates at 1731556550148Preparing flush snapshotting stores in 1588230740 at 1731556550148Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731556550149 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731556550150 (+1 ms)Flushing 1588230740/ns: creating writer at 1731556550150Flushing 1588230740/ns: appending metadata at 1731556550177 (+27 ms)Flushing 1588230740/ns: closing flushed file at 1731556550177Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2be8e7e4: reopening flushed file at 1731556550226 (+49 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=6, compaction requested=false at 1731556550240 (+14 ms)Writing region close event to WAL at 1731556550247 (+7 ms)Running coprocessor post-close hooks at 1731556550255 (+8 ms)Closed at 1731556550256 (+1 ms) 2024-11-14T03:55:50,256 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:55:50,348 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,39909,1731556548668; all regions closed. 2024-11-14T03:55:50,349 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,349 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,349 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,349 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,350 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741834_1010 (size=1152) 2024-11-14T03:55:50,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741834_1010 (size=1152) 2024-11-14T03:55:50,357 DEBUG [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/oldWALs 2024-11-14T03:55:50,357 INFO [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C39909%2C1731556548668.meta:.meta(num 1731556550005) 2024-11-14T03:55:50,358 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,358 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,358 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,359 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741833_1009 (size=93) 2024-11-14T03:55:50,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741833_1009 (size=93) 2024-11-14T03:55:50,373 DEBUG [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/oldWALs 2024-11-14T03:55:50,373 INFO [RS:0;6a989be77d39:39909 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C39909%2C1731556548668:(num 1731556549633) 2024-11-14T03:55:50,374 DEBUG [RS:0;6a989be77d39:39909 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:50,374 INFO [RS:0;6a989be77d39:39909 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:55:50,374 INFO [RS:0;6a989be77d39:39909 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:55:50,374 INFO [RS:0;6a989be77d39:39909 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T03:55:50,375 INFO [RS:0;6a989be77d39:39909 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:55:50,375 INFO [RS:0;6a989be77d39:39909 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39909 2024-11-14T03:55:50,375 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:55:50,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:55:50,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,39909,1731556548668 2024-11-14T03:55:50,380 INFO [RS:0;6a989be77d39:39909 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:55:50,384 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,39909,1731556548668] 2024-11-14T03:55:50,386 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,39909,1731556548668 already deleted, retry=false 2024-11-14T03:55:50,386 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,39909,1731556548668 expired; onlineServers=0 2024-11-14T03:55:50,386 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,45611,1731556548614' ***** 2024-11-14T03:55:50,386 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:55:50,387 INFO [M:0;6a989be77d39:45611 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:55:50,387 INFO [M:0;6a989be77d39:45611 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:55:50,387 DEBUG [M:0;6a989be77d39:45611 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:55:50,387 DEBUG [M:0;6a989be77d39:45611 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:55:50,387 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:55:50,387 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556549372 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556549372,5,FailOnTimeoutGroup] 2024-11-14T03:55:50,387 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556549373 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556549373,5,FailOnTimeoutGroup] 2024-11-14T03:55:50,388 INFO [M:0;6a989be77d39:45611 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:55:50,388 INFO [M:0;6a989be77d39:45611 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:55:50,388 DEBUG [M:0;6a989be77d39:45611 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:55:50,388 INFO [M:0;6a989be77d39:45611 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:55:50,388 INFO [M:0;6a989be77d39:45611 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:55:50,388 INFO [M:0;6a989be77d39:45611 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:55:50,389 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:55:50,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:55:50,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:50,392 DEBUG [M:0;6a989be77d39:45611 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-14T03:55:50,393 DEBUG [M:0;6a989be77d39:45611 {}] master.ActiveMasterManager(353): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-14T03:55:50,393 INFO [M:0;6a989be77d39:45611 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/.lastflushedseqids 2024-11-14T03:55:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741836_1012 (size=108) 2024-11-14T03:55:50,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741836_1012 (size=108) 2024-11-14T03:55:50,418 INFO [M:0;6a989be77d39:45611 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:55:50,419 INFO [M:0;6a989be77d39:45611 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:55:50,419 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:55:50,419 INFO [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:50,419 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:50,419 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:55:50,419 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:50,419 INFO [M:0;6a989be77d39:45611 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T03:55:50,441 DEBUG [M:0;6a989be77d39:45611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9fb69c2cd1cc4b8d9e073894e397846a is 82, key is hbase:meta,,1/info:regioninfo/1731556550050/Put/seqid=0 2024-11-14T03:55:50,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741837_1013 (size=5672) 2024-11-14T03:55:50,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741837_1013 (size=5672) 2024-11-14T03:55:50,450 INFO [M:0;6a989be77d39:45611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9fb69c2cd1cc4b8d9e073894e397846a 2024-11-14T03:55:50,475 DEBUG [M:0;6a989be77d39:45611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8c12c12f7f964ccca4d728880d2f2474 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731556550078/Put/seqid=0 2024-11-14T03:55:50,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741838_1014 (size=5275) 2024-11-14T03:55:50,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741838_1014 (size=5275) 2024-11-14T03:55:50,482 INFO [M:0;6a989be77d39:45611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8c12c12f7f964ccca4d728880d2f2474 2024-11-14T03:55:50,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:50,486 INFO [RS:0;6a989be77d39:39909 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:55:50,486 INFO [RS:0;6a989be77d39:39909 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,39909,1731556548668; zookeeper connection closed. 2024-11-14T03:55:50,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39909-0x100ef6314910001, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:50,486 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@78a83004 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@78a83004 2024-11-14T03:55:50,487 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T03:55:50,505 DEBUG [M:0;6a989be77d39:45611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d7d2d843b9a7489ab0ca278b5e46fa90 is 69, key is 6a989be77d39,39909,1731556548668/rs:state/1731556549422/Put/seqid=0 2024-11-14T03:55:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741839_1015 (size=5156) 2024-11-14T03:55:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741839_1015 (size=5156) 2024-11-14T03:55:50,512 INFO [M:0;6a989be77d39:45611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d7d2d843b9a7489ab0ca278b5e46fa90 2024-11-14T03:55:50,541 DEBUG [M:0;6a989be77d39:45611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dea9be782e2d4417a051cc72b8821c54 is 52, key is load_balancer_on/state:d/1731556550122/Put/seqid=0 2024-11-14T03:55:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741840_1016 (size=5056) 2024-11-14T03:55:50,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741840_1016 (size=5056) 2024-11-14T03:55:50,555 INFO [M:0;6a989be77d39:45611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dea9be782e2d4417a051cc72b8821c54 2024-11-14T03:55:50,564 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9fb69c2cd1cc4b8d9e073894e397846a as hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9fb69c2cd1cc4b8d9e073894e397846a 2024-11-14T03:55:50,574 INFO [M:0;6a989be77d39:45611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9fb69c2cd1cc4b8d9e073894e397846a, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T03:55:50,576 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8c12c12f7f964ccca4d728880d2f2474 as hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8c12c12f7f964ccca4d728880d2f2474 2024-11-14T03:55:50,586 INFO [M:0;6a989be77d39:45611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8c12c12f7f964ccca4d728880d2f2474, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T03:55:50,594 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d7d2d843b9a7489ab0ca278b5e46fa90 as hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d7d2d843b9a7489ab0ca278b5e46fa90 2024-11-14T03:55:50,603 INFO [M:0;6a989be77d39:45611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d7d2d843b9a7489ab0ca278b5e46fa90, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T03:55:50,605 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dea9be782e2d4417a051cc72b8821c54 as hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dea9be782e2d4417a051cc72b8821c54 2024-11-14T03:55:50,611 INFO [M:0;6a989be77d39:45611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35605/user/jenkins/test-data/7d419bf0-157e-3671-670d-ef55e862a7dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dea9be782e2d4417a051cc72b8821c54, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T03:55:50,613 INFO [M:0;6a989be77d39:45611 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=29, compaction requested=false 2024-11-14T03:55:50,615 INFO [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:50,615 DEBUG [M:0;6a989be77d39:45611 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556550419Disabling compacts and flushes for region at 1731556550419Disabling writes for close at 1731556550419Obtaining lock to block concurrent updates at 1731556550419Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556550419Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731556550420 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556550421 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556550421Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556550440 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556550440Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556550457 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556550474 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556550474Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556550489 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556550505 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556550505Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556550519 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556550540 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556550540Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65078bc8: reopening flushed file at 1731556550563 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a48bc87: reopening flushed file at 1731556550574 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50a94727: reopening flushed file at 1731556550586 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@622a350b: reopening flushed file at 1731556550604 (+18 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=29, compaction requested=false at 1731556550613 (+9 ms)Writing region close event to WAL at 1731556550614 (+1 ms)Closed at 1731556550615 (+1 ms) 2024-11-14T03:55:50,615 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,615 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,616 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,616 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,616 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:55:50,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46337 is added to blk_1073741830_1006 (size=10311) 2024-11-14T03:55:50,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35803 is added to blk_1073741830_1006 (size=10311) 2024-11-14T03:55:50,620 INFO [M:0;6a989be77d39:45611 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:55:50,620 INFO [M:0;6a989be77d39:45611 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45611 2024-11-14T03:55:50,620 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:55:50,620 INFO [M:0;6a989be77d39:45611 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:55:50,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:50,723 INFO [M:0;6a989be77d39:45611 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:55:50,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45611-0x100ef6314910000, quorum=127.0.0.1:57134, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:55:50,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@156b26b5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:50,727 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dd99d02{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:55:50,727 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:55:50,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@254e0164{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:55:50,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30ebe7e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir/,STOPPED} 2024-11-14T03:55:50,729 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:55:50,730 WARN [BP-283798774-172.17.0.2-1731556547840 heartbeating to localhost/127.0.0.1:35605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:55:50,730 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:55:50,730 WARN [BP-283798774-172.17.0.2-1731556547840 heartbeating to localhost/127.0.0.1:35605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-283798774-172.17.0.2-1731556547840 (Datanode Uuid 02ac9d1b-e712-4487-ac30-40cec3d33cce) service to localhost/127.0.0.1:35605 2024-11-14T03:55:50,731 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data3/current/BP-283798774-172.17.0.2-1731556547840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:50,731 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data4/current/BP-283798774-172.17.0.2-1731556547840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:50,731 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:55:50,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bd218c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:50,743 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a9b9566{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:55:50,743 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:55:50,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d281952{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:55:50,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b5aedfa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir/,STOPPED} 2024-11-14T03:55:50,746 WARN [BP-283798774-172.17.0.2-1731556547840 heartbeating to localhost/127.0.0.1:35605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:55:50,746 WARN [BP-283798774-172.17.0.2-1731556547840 heartbeating to localhost/127.0.0.1:35605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-283798774-172.17.0.2-1731556547840 (Datanode Uuid 6dfe2db9-c1d3-4f69-a179-bed7c58ca055) service to localhost/127.0.0.1:35605 2024-11-14T03:55:50,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data1/current/BP-283798774-172.17.0.2-1731556547840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:50,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/cluster_9d2e8ea4-2263-f9c7-8415-37eeac943382/data/data2/current/BP-283798774-172.17.0.2-1731556547840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:55:50,748 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:55:50,748 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:55:50,748 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:55:50,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@245f97f3{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:55:50,758 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@219d080{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:55:50,758 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:55:50,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10885b70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:55:50,758 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c443180{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir/,STOPPED} 2024-11-14T03:55:50,770 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:55:50,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:55:50,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:55:50,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.log.dir so I do NOT create it in target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67 2024-11-14T03:55:50,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/27eda811-622c-b0d9-0449-e641d141fd93/hadoop.tmp.dir so I do NOT create it in target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67 2024-11-14T03:55:50,797 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a, deleteOnExit=true 2024-11-14T03:55:50,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:55:50,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/test.cache.data in system properties and HBase conf 2024-11-14T03:55:50,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:55:50,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:55:50,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:55:50,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:55:50,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:55:50,798 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:55:50,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:55:50,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:55:50,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:55:50,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:55:50,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:55:50,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:55:50,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:55:50,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:55:50,819 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:55:50,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:55:50,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:55:50,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T03:55:50,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T03:55:50,896 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:50,902 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:55:50,906 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:55:50,906 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:55:50,906 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:55:50,907 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:50,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73b9709e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:55:50,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fd186ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:55:51,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d607c51{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-35559-hadoop-hdfs-3_4_1-tests_jar-_-any-17529438448627109809/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:55:51,047 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ae0a20f{HTTP/1.1, (http/1.1)}{localhost:35559} 2024-11-14T03:55:51,047 INFO [Time-limited test {}] server.Server(415): Started @107059ms 2024-11-14T03:55:51,069 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:55:51,149 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:51,154 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:55:51,158 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:55:51,158 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:55:51,158 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:55:51,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@307e6985{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:55:51,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53463a69{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:55:51,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bd89867{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-42685-hadoop-hdfs-3_4_1-tests_jar-_-any-11550054716724547030/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:51,292 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1aa07d80{HTTP/1.1, (http/1.1)}{localhost:42685} 2024-11-14T03:55:51,292 INFO [Time-limited test {}] server.Server(415): Started @107305ms 2024-11-14T03:55:51,294 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:55:51,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:55:51,340 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:55:51,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:55:51,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:55:51,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:55:51,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28707c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:55:51,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75de815b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:55:51,387 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data2/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:51,387 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data1/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:51,407 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:55:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x149f66cb14273947 with lease ID 0xe1192eedb63a5643: Processing first storage report for DS-54151df9-487a-44ab-b5ce-19ad1242def2 from datanode DatanodeRegistration(127.0.0.1:33561, datanodeUuid=e3c0dcc4-5c7f-4d8c-b3bb-e265a0f253d0, infoPort=38925, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:55:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x149f66cb14273947 with lease ID 0xe1192eedb63a5643: from storage DS-54151df9-487a-44ab-b5ce-19ad1242def2 node DatanodeRegistration(127.0.0.1:33561, datanodeUuid=e3c0dcc4-5c7f-4d8c-b3bb-e265a0f253d0, infoPort=38925, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x149f66cb14273947 with lease ID 0xe1192eedb63a5643: Processing first storage report for DS-710e00ef-bc3b-485c-a28a-a8b2b110a376 from datanode DatanodeRegistration(127.0.0.1:33561, datanodeUuid=e3c0dcc4-5c7f-4d8c-b3bb-e265a0f253d0, infoPort=38925, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:55:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x149f66cb14273947 with lease ID 0xe1192eedb63a5643: from storage DS-710e00ef-bc3b-485c-a28a-a8b2b110a376 node DatanodeRegistration(127.0.0.1:33561, datanodeUuid=e3c0dcc4-5c7f-4d8c-b3bb-e265a0f253d0, infoPort=38925, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:51,464 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:55:51,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57f0db4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-37183-hadoop-hdfs-3_4_1-tests_jar-_-any-8859780017668847773/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:55:51,475 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78ab676e{HTTP/1.1, (http/1.1)}{localhost:37183} 2024-11-14T03:55:51,475 INFO [Time-limited test {}] server.Server(415): Started @107487ms 2024-11-14T03:55:51,477 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:55:51,613 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data4/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:51,613 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data3/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:55:51,649 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:55:51,657 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa339d829cd3bd763 with lease ID 0xe1192eedb63a5644: Processing first storage report for DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa from datanode DatanodeRegistration(127.0.0.1:45391, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=45965, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:55:51,657 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa339d829cd3bd763 with lease ID 0xe1192eedb63a5644: from storage DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa node DatanodeRegistration(127.0.0.1:45391, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=45965, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:51,657 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa339d829cd3bd763 with lease ID 0xe1192eedb63a5644: Processing first storage report for DS-66514651-f8c3-401d-a0f7-b9df137518b2 from datanode DatanodeRegistration(127.0.0.1:45391, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=45965, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:55:51,657 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa339d829cd3bd763 with lease ID 0xe1192eedb63a5644: from storage DS-66514651-f8c3-401d-a0f7-b9df137518b2 node DatanodeRegistration(127.0.0.1:45391, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=45965, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:55:51,758 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67 2024-11-14T03:55:51,761 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/zookeeper_0, clientPort=64949, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:55:51,762 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64949 2024-11-14T03:55:51,763 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:55:51,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:55:51,776 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65 with version=8 2024-11-14T03:55:51,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:55:51,778 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:55:51,778 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:51,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:51,779 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:55:51,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:51,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:55:51,779 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:55:51,779 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:55:51,780 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44171 2024-11-14T03:55:51,781 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44171 connecting to ZooKeeper ensemble=127.0.0.1:64949 2024-11-14T03:55:51,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441710x0, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:55:51,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44171-0x100ef6320fb0000 connected 2024-11-14T03:55:51,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,833 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:51,833 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65, hbase.cluster.distributed=false 2024-11-14T03:55:51,835 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:55:51,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44171 2024-11-14T03:55:51,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44171 2024-11-14T03:55:51,856 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44171 2024-11-14T03:55:51,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44171 2024-11-14T03:55:51,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44171 2024-11-14T03:55:51,881 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:55:51,881 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:55:51,882 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46431 2024-11-14T03:55:51,884 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46431 connecting to ZooKeeper ensemble=127.0.0.1:64949 2024-11-14T03:55:51,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464310x0, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:55:51,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46431-0x100ef6320fb0001 connected 2024-11-14T03:55:51,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:55:51,893 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:55:51,893 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:55:51,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:55:51,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:55:51,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46431 2024-11-14T03:55:51,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46431 2024-11-14T03:55:51,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46431 2024-11-14T03:55:51,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46431 2024-11-14T03:55:51,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46431 2024-11-14T03:55:51,911 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:44171 2024-11-14T03:55:51,911 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,44171,1731556551778 2024-11-14T03:55:51,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:51,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:51,914 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,44171,1731556551778 2024-11-14T03:55:51,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:51,916 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:55:51,917 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,44171,1731556551778 from backup master directory 2024-11-14T03:55:51,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:55:51,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:51,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,44171,1731556551778 2024-11-14T03:55:51,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:51,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:55:51,918 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:55:51,918 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,44171,1731556551778 2024-11-14T03:55:51,918 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(347): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/backup-masters/6a989be77d39,44171,1731556551778 because node does not exist (not an error) 2024-11-14T03:55:51,927 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/hbase.id] with ID: 0312010e-cb3d-45c2-a06a-57701560ed6c 2024-11-14T03:55:51,927 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/.tmp/hbase.id 2024-11-14T03:55:51,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:55:51,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:55:51,939 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/.tmp/hbase.id]:[hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/hbase.id] 2024-11-14T03:55:51,959 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:51,959 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:55:51,961 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T03:55:51,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:51,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:55:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:55:52,024 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:55:52,025 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:55:52,025 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:52,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:55:52,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:55:52,050 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store 2024-11-14T03:55:52,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:55:52,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:55:52,060 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:52,060 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:55:52,060 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:52,060 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:52,060 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:55:52,060 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:52,060 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:55:52,061 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556552060Disabling compacts and flushes for region at 1731556552060Disabling writes for close at 1731556552060Writing region close event to WAL at 1731556552060Closed at 1731556552060 2024-11-14T03:55:52,061 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/.initializing 2024-11-14T03:55:52,062 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778 2024-11-14T03:55:52,064 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C44171%2C1731556551778, suffix=, logDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778, archiveDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/oldWALs, maxLogs=10 2024-11-14T03:55:52,065 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C44171%2C1731556551778.1731556552065 2024-11-14T03:55:52,072 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 2024-11-14T03:55:52,073 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45965:45965),(127.0.0.1/127.0.0.1:38925:38925)] 2024-11-14T03:55:52,078 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:55:52,078 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:52,078 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,079 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:55:52,082 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:55:52,085 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:52,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:55:52,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:52,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:55:52,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:52,089 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,090 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,091 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,092 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,092 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,093 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:55:52,094 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:55:52,097 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:55:52,097 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688832, jitterRate=-0.12410534918308258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:55:52,098 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556552079Initializing all the Stores at 1731556552080 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552080Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556552081 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556552081Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556552081Cleaning up temporary data from old regions at 1731556552092 (+11 ms)Region opened successfully at 1731556552098 (+6 ms) 2024-11-14T03:55:52,099 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:55:52,103 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@363c54d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:55:52,104 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:55:52,104 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:55:52,104 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:55:52,104 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:55:52,105 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T03:55:52,105 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T03:55:52,105 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:55:52,108 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:55:52,109 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:55:52,110 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:55:52,110 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:55:52,111 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:55:52,112 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:55:52,113 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:55:52,114 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:55:52,115 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:55:52,116 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:55:52,117 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:55:52,119 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:55:52,121 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:55:52,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:52,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:55:52,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,124 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,44171,1731556551778, sessionid=0x100ef6320fb0000, setting cluster-up flag (Was=false) 2024-11-14T03:55:52,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,132 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:55:52,133 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,44171,1731556551778 2024-11-14T03:55:52,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,143 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:55:52,144 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,44171,1731556551778 2024-11-14T03:55:52,145 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:55:52,147 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:52,147 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:55:52,148 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:55:52,148 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,44171,1731556551778 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:55:52,150 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556582153 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:55:52,153 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:55:52,153 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:52,154 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:55:52,154 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,154 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:55:52,154 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:55:52,154 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:55:52,155 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:55:52,155 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:55:52,155 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,155 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556552155,5,FailOnTimeoutGroup] 2024-11-14T03:55:52,155 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556552155,5,FailOnTimeoutGroup] 2024-11-14T03:55:52,155 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:55:52,155 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,155 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:55:52,156 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,156 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:55:52,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:55:52,164 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:55:52,164 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65 2024-11-14T03:55:52,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:55:52,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:55:52,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:52,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:55:52,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:55:52,179 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:55:52,181 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:55:52,181 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:55:52,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:55:52,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:55:52,186 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:55:52,186 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:55:52,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740 2024-11-14T03:55:52,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740 2024-11-14T03:55:52,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:55:52,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:55:52,190 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:55:52,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:55:52,200 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:55:52,200 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811797, jitterRate=0.03225331008434296}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:55:52,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556552173Initializing all the Stores at 1731556552174 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552174Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552177 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556552177Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552177Cleaning up temporary data from old regions at 1731556552190 (+13 ms)Region opened successfully at 1731556552202 (+12 ms) 2024-11-14T03:55:52,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:55:52,202 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:55:52,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:55:52,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:55:52,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:55:52,203 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:55:52,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556552202Disabling compacts and flushes for region at 1731556552202Disabling writes for close at 1731556552202Writing region close event to WAL at 1731556552203 (+1 ms)Closed at 1731556552203 2024-11-14T03:55:52,203 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(746): ClusterId : 0312010e-cb3d-45c2-a06a-57701560ed6c 2024-11-14T03:55:52,203 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:55:52,205 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:52,205 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:55:52,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:55:52,206 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:55:52,206 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:55:52,207 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:55:52,209 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:55:52,210 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:55:52,210 DEBUG [RS:0;6a989be77d39:46431 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@547d4f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:55:52,225 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:46431 2024-11-14T03:55:52,225 INFO [RS:0;6a989be77d39:46431 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:55:52,225 INFO [RS:0;6a989be77d39:46431 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:55:52,225 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:55:52,226 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,44171,1731556551778 with port=46431, startcode=1731556551881 2024-11-14T03:55:52,226 DEBUG [RS:0;6a989be77d39:46431 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:55:52,229 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40497, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:55:52,230 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44171 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,230 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44171 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,232 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65 2024-11-14T03:55:52,232 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43619 2024-11-14T03:55:52,232 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:55:52,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:55:52,235 DEBUG [RS:0;6a989be77d39:46431 {}] zookeeper.ZKUtil(111): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,235 WARN [RS:0;6a989be77d39:46431 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:55:52,236 INFO [RS:0;6a989be77d39:46431 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:52,236 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,46431,1731556551881] 2024-11-14T03:55:52,236 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,240 INFO [RS:0;6a989be77d39:46431 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:55:52,243 INFO [RS:0;6a989be77d39:46431 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:55:52,244 INFO [RS:0;6a989be77d39:46431 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:55:52,244 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,244 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:55:52,245 INFO [RS:0;6a989be77d39:46431 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:55:52,246 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,246 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,247 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,247 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,247 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:52,247 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:55:52,247 DEBUG [RS:0;6a989be77d39:46431 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:55:52,249 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,250 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,250 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,250 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,250 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,250 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46431,1731556551881-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:55:52,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:52,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:52,274 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:55:52,274 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46431,1731556551881-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,274 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,274 INFO [RS:0;6a989be77d39:46431 {}] regionserver.Replication(171): 6a989be77d39,46431,1731556551881 started 2024-11-14T03:55:52,292 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,292 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,46431,1731556551881, RpcServer on 6a989be77d39/172.17.0.2:46431, sessionid=0x100ef6320fb0001 2024-11-14T03:55:52,293 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:55:52,293 DEBUG [RS:0;6a989be77d39:46431 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,293 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,46431,1731556551881' 2024-11-14T03:55:52,293 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:55:52,293 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:55:52,294 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:55:52,294 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:55:52,294 DEBUG [RS:0;6a989be77d39:46431 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,294 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,46431,1731556551881' 2024-11-14T03:55:52,294 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:55:52,295 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:55:52,295 DEBUG [RS:0;6a989be77d39:46431 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:55:52,295 INFO [RS:0;6a989be77d39:46431 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:55:52,295 INFO [RS:0;6a989be77d39:46431 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:55:52,360 WARN [6a989be77d39:44171 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T03:55:52,398 INFO [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C46431%2C1731556551881, suffix=, logDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881, archiveDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs, maxLogs=32 2024-11-14T03:55:52,400 INFO [RS:0;6a989be77d39:46431 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.1731556552400 2024-11-14T03:55:52,424 INFO [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 2024-11-14T03:55:52,449 DEBUG [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45965:45965),(127.0.0.1/127.0.0.1:38925:38925)] 2024-11-14T03:55:52,610 DEBUG [6a989be77d39:44171 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:55:52,611 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,613 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,46431,1731556551881, state=OPENING 2024-11-14T03:55:52,615 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:55:52,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:55:52,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:52,618 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:55:52,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,46431,1731556551881}] 2024-11-14T03:55:52,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:52,772 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:55:52,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34707, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:55:52,813 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:55:52,813 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:55:52,814 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:52,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:52,816 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C46431%2C1731556551881.meta, suffix=.meta, logDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881, archiveDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs, maxLogs=32 2024-11-14T03:55:52,817 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta 2024-11-14T03:55:52,830 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta 2024-11-14T03:55:52,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:52,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:52,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:52,845 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38925:38925),(127.0.0.1/127.0.0.1:45965:45965)] 2024-11-14T03:55:52,847 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:55:52,848 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:55:52,848 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:55:52,848 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:55:52,848 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:55:52,848 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:52,848 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:55:52,848 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:55:52,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:55:52,851 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:55:52,852 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:55:52,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:55:52,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:55:52,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:55:52,856 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:55:52,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:55:52,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:52,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:55:52,859 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:55:52,860 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740 2024-11-14T03:55:52,862 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740 2024-11-14T03:55:52,863 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:55:52,863 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:55:52,864 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:55:52,866 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:55:52,867 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833890, jitterRate=0.060346633195877075}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:55:52,868 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:55:52,868 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556552848Writing region info on filesystem at 1731556552848Initializing all the Stores at 1731556552850 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552850Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552850Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556552850Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556552850Cleaning up temporary data from old regions at 1731556552863 (+13 ms)Running coprocessor post-open hooks at 1731556552868 (+5 ms)Region opened successfully at 1731556552868 2024-11-14T03:55:52,870 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556552772 2024-11-14T03:55:52,874 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:55:52,874 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:55:52,875 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,878 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,46431,1731556551881, state=OPEN 2024-11-14T03:55:52,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:55:52,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:55:52,883 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,46431,1731556551881 2024-11-14T03:55:52,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:52,883 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:55:52,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:55:52,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,46431,1731556551881 in 265 msec 2024-11-14T03:55:52,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:55:52,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 682 msec 2024-11-14T03:55:52,890 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:55:52,890 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:55:52,892 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:55:52,892 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,46431,1731556551881, seqNum=-1] 2024-11-14T03:55:52,893 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:55:52,894 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57297, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:55:52,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 754 msec 2024-11-14T03:55:52,902 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556552902, completionTime=-1 2024-11-14T03:55:52,903 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:55:52,903 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:55:52,907 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:55:52,907 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556612907 2024-11-14T03:55:52,907 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556672907 2024-11-14T03:55:52,907 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-11-14T03:55:52,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44171,1731556551778-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44171,1731556551778-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44171,1731556551778-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:44171, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,909 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:52,911 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.997sec 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44171,1731556551778-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:55:52,915 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44171,1731556551778-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:55:52,918 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:55:52,918 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:55:52,918 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,44171,1731556551778-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a63af9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:55:53,014 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,44171,-1 for getting cluster id 2024-11-14T03:55:53,014 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:55:53,018 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0312010e-cb3d-45c2-a06a-57701560ed6c' 2024-11-14T03:55:53,018 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:55:53,018 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0312010e-cb3d-45c2-a06a-57701560ed6c" 2024-11-14T03:55:53,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@653d2fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:55:53,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,44171,-1] 2024-11-14T03:55:53,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:55:53,020 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:55:53,021 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:55:53,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17d68fa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:55:53,023 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:55:53,024 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,46431,1731556551881, seqNum=-1] 2024-11-14T03:55:53,025 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:55:53,027 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58458, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:55:53,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,44171,1731556551778 2024-11-14T03:55:53,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:53,032 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:55:53,048 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:55:53,048 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:55:53,049 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36173 2024-11-14T03:55:53,050 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36173 connecting to ZooKeeper ensemble=127.0.0.1:64949 2024-11-14T03:55:53,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:53,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:55:53,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:361730x0, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:55:53,059 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:361730x0, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T03:55:53,059 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T03:55:53,060 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:55:53,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36173-0x100ef6320fb0002 connected 2024-11-14T03:55:53,066 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:55:53,066 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:55:53,068 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:55:53,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36173 2024-11-14T03:55:53,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36173 2024-11-14T03:55:53,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36173 2024-11-14T03:55:53,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36173 2024-11-14T03:55:53,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36173 2024-11-14T03:55:53,071 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(746): ClusterId : 0312010e-cb3d-45c2-a06a-57701560ed6c 2024-11-14T03:55:53,071 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:55:53,074 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:55:53,074 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:55:53,076 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:55:53,077 DEBUG [RS:1;6a989be77d39:36173 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16d1fe96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:55:53,095 DEBUG [RS:1;6a989be77d39:36173 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6a989be77d39:36173 2024-11-14T03:55:53,095 INFO [RS:1;6a989be77d39:36173 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:55:53,095 INFO [RS:1;6a989be77d39:36173 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:55:53,095 DEBUG [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:55:53,096 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,44171,1731556551778 with port=36173, startcode=1731556553047 2024-11-14T03:55:53,096 DEBUG [RS:1;6a989be77d39:36173 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:55:53,098 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53177, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:55:53,099 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44171 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,36173,1731556553047 2024-11-14T03:55:53,099 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44171 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,36173,1731556553047 2024-11-14T03:55:53,101 DEBUG [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65 2024-11-14T03:55:53,101 DEBUG [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43619 2024-11-14T03:55:53,101 DEBUG [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:55:53,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:55:53,104 DEBUG [RS:1;6a989be77d39:36173 {}] zookeeper.ZKUtil(111): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,36173,1731556553047 2024-11-14T03:55:53,104 WARN [RS:1;6a989be77d39:36173 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:55:53,104 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,36173,1731556553047] 2024-11-14T03:55:53,104 INFO [RS:1;6a989be77d39:36173 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:55:53,104 DEBUG [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047 2024-11-14T03:55:53,109 INFO [RS:1;6a989be77d39:36173 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:55:53,111 INFO [RS:1;6a989be77d39:36173 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:55:53,113 INFO [RS:1;6a989be77d39:36173 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:55:53,113 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,114 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:55:53,115 INFO [RS:1;6a989be77d39:36173 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:55:53,115 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,115 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,116 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,116 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,116 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:55:53,116 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:55:53,116 DEBUG [RS:1;6a989be77d39:36173 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:55:53,121 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,121 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,121 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,121 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,121 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,121 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,36173,1731556553047-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:55:53,138 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:55:53,138 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,36173,1731556553047-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,138 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,138 INFO [RS:1;6a989be77d39:36173 {}] regionserver.Replication(171): 6a989be77d39,36173,1731556553047 started 2024-11-14T03:55:53,154 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:55:53,154 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,36173,1731556553047, RpcServer on 6a989be77d39/172.17.0.2:36173, sessionid=0x100ef6320fb0002 2024-11-14T03:55:53,154 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:55:53,154 DEBUG [RS:1;6a989be77d39:36173 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,36173,1731556553047 2024-11-14T03:55:53,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;6a989be77d39:36173,5,FailOnTimeoutGroup] 2024-11-14T03:55:53,154 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,36173,1731556553047' 2024-11-14T03:55:53,154 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:55:53,155 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-14T03:55:53,155 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:55:53,155 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T03:55:53,156 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:55:53,156 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:55:53,156 DEBUG [RS:1;6a989be77d39:36173 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,36173,1731556553047 2024-11-14T03:55:53,156 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,36173,1731556553047' 2024-11-14T03:55:53,156 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:55:53,156 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 6a989be77d39,44171,1731556551778 2024-11-14T03:55:53,156 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77c8c1f0 2024-11-14T03:55:53,156 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:55:53,157 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T03:55:53,157 DEBUG [RS:1;6a989be77d39:36173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:55:53,157 INFO [RS:1;6a989be77d39:36173 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:55:53,157 INFO [RS:1;6a989be77d39:36173 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:55:53,159 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T03:55:53,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T03:55:53,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T03:55:53,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:55:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T03:55:53,163 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T03:55:53,164 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:53,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-14T03:55:53,165 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T03:55:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:55:53,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741835_1011 (size=393) 2024-11-14T03:55:53,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741835_1011 (size=393) 2024-11-14T03:55:53,182 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 00f2c2bd2dc7930772775ee3b7c158f2, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65 2024-11-14T03:55:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33561 is added to blk_1073741836_1012 (size=76) 2024-11-14T03:55:53,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45391 is added to blk_1073741836_1012 (size=76) 2024-11-14T03:55:53,259 INFO [RS:1;6a989be77d39:36173 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C36173%2C1731556553047, suffix=, logDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047, archiveDir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs, maxLogs=32 2024-11-14T03:55:53,260 INFO [RS:1;6a989be77d39:36173 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C36173%2C1731556553047.1731556553260 2024-11-14T03:55:53,270 INFO [RS:1;6a989be77d39:36173 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 2024-11-14T03:55:53,275 DEBUG [RS:1;6a989be77d39:36173 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45965:45965),(127.0.0.1/127.0.0.1:38925:38925)] 2024-11-14T03:55:53,594 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:53,594 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 00f2c2bd2dc7930772775ee3b7c158f2, disabling compactions & flushes 2024-11-14T03:55:53,594 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,594 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. after waiting 0 ms 2024-11-14T03:55:53,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,595 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,595 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 00f2c2bd2dc7930772775ee3b7c158f2: Waiting for close lock at 1731556553594Disabling compacts and flushes for region at 1731556553594Disabling writes for close at 1731556553595 (+1 ms)Writing region close event to WAL at 1731556553595Closed at 1731556553595 2024-11-14T03:55:53,597 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T03:55:53,598 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731556553597"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556553597"}]},"ts":"1731556553597"} 2024-11-14T03:55:53,603 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T03:55:53,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T03:55:53,605 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556553605"}]},"ts":"1731556553605"} 2024-11-14T03:55:53,608 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-14T03:55:53,612 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {6a989be77d39=0} racks are {/default-rack=0} 2024-11-14T03:55:53,619 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-14T03:55:53,619 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-14T03:55:53,619 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-14T03:55:53,619 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-14T03:55:53,619 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-14T03:55:53,619 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-14T03:55:53,619 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-14T03:55:53,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00f2c2bd2dc7930772775ee3b7c158f2, ASSIGN}] 2024-11-14T03:55:53,623 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00f2c2bd2dc7930772775ee3b7c158f2, ASSIGN 2024-11-14T03:55:53,625 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00f2c2bd2dc7930772775ee3b7c158f2, ASSIGN; state=OFFLINE, location=6a989be77d39,46431,1731556551881; forceNewPlan=false, retain=false 2024-11-14T03:55:53,776 INFO [6a989be77d39:44171 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-14T03:55:53,776 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=00f2c2bd2dc7930772775ee3b7c158f2, regionState=OPENING, regionLocation=6a989be77d39,46431,1731556551881 2024-11-14T03:55:53,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00f2c2bd2dc7930772775ee3b7c158f2, ASSIGN because future has completed 2024-11-14T03:55:53,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 00f2c2bd2dc7930772775ee3b7c158f2, server=6a989be77d39,46431,1731556551881}] 2024-11-14T03:55:53,938 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,938 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 00f2c2bd2dc7930772775ee3b7c158f2, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:55:53,939 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,939 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:55:53,939 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,940 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,941 INFO [StoreOpener-00f2c2bd2dc7930772775ee3b7c158f2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,943 INFO [StoreOpener-00f2c2bd2dc7930772775ee3b7c158f2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 00f2c2bd2dc7930772775ee3b7c158f2 columnFamilyName info 2024-11-14T03:55:53,943 DEBUG [StoreOpener-00f2c2bd2dc7930772775ee3b7c158f2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:55:53,944 INFO [StoreOpener-00f2c2bd2dc7930772775ee3b7c158f2-1 {}] regionserver.HStore(327): Store=00f2c2bd2dc7930772775ee3b7c158f2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:55:53,944 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,945 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,945 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,945 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,946 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,948 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,953 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:55:53,954 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 00f2c2bd2dc7930772775ee3b7c158f2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858693, jitterRate=0.09188464283943176}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:55:53,954 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:55:53,955 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 00f2c2bd2dc7930772775ee3b7c158f2: Running coprocessor pre-open hook at 1731556553940Writing region info on filesystem at 1731556553940Initializing all the Stores at 1731556553941 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556553941Cleaning up temporary data from old regions at 1731556553946 (+5 ms)Running coprocessor post-open hooks at 1731556553954 (+8 ms)Region opened successfully at 1731556553955 (+1 ms) 2024-11-14T03:55:53,956 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2., pid=6, masterSystemTime=1731556553933 2024-11-14T03:55:53,960 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,960 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:55:53,961 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=00f2c2bd2dc7930772775ee3b7c158f2, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,46431,1731556551881 2024-11-14T03:55:53,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 00f2c2bd2dc7930772775ee3b7c158f2, server=6a989be77d39,46431,1731556551881 because future has completed 2024-11-14T03:55:53,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T03:55:53,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 00f2c2bd2dc7930772775ee3b7c158f2, server=6a989be77d39,46431,1731556551881 in 195 msec 2024-11-14T03:55:53,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T03:55:53,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=00f2c2bd2dc7930772775ee3b7c158f2, ASSIGN in 359 msec 2024-11-14T03:55:53,983 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T03:55:53,984 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556553984"}]},"ts":"1731556553984"} 2024-11-14T03:55:53,987 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-14T03:55:53,989 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T03:55:53,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 830 msec 2024-11-14T03:55:58,350 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:55:58,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:58,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:58,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:58,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:55:58,388 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-14T03:56:00,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:56:00,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T03:56:00,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T03:56:00,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-14T03:56:00,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:56:00,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T03:56:00,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T03:56:00,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T03:56:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44171 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:56:03,202 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-14T03:56:03,202 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-14T03:56:03,206 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T03:56:03,206 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:03,233 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:03,240 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:03,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:03,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:03,246 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:03,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@608d2530{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:03,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2488e9f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:03,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21e18edd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-46213-hadoop-hdfs-3_4_1-tests_jar-_-any-720360336618490129/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:03,372 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65266587{HTTP/1.1, (http/1.1)}{localhost:46213} 2024-11-14T03:56:03,372 INFO [Time-limited test {}] server.Server(415): Started @119384ms 2024-11-14T03:56:03,373 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:03,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:03,416 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:03,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:03,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:03,417 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:56:03,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53c1440a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:03,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f24e79d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:03,471 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data5/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:03,472 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data6/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:03,491 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:03,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x517b3d990ad7e345 with lease ID 0xe1192eedb63a5645: Processing first storage report for DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138 from datanode DatanodeRegistration(127.0.0.1:44499, datanodeUuid=a043d1df-4c47-430d-8d2f-53005218ff3a, infoPort=33651, infoSecurePort=0, ipcPort=45495, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:56:03,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x517b3d990ad7e345 with lease ID 0xe1192eedb63a5645: from storage DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138 node DatanodeRegistration(127.0.0.1:44499, datanodeUuid=a043d1df-4c47-430d-8d2f-53005218ff3a, infoPort=33651, infoSecurePort=0, ipcPort=45495, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:03,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x517b3d990ad7e345 with lease ID 0xe1192eedb63a5645: Processing first storage report for DS-1acb5661-f751-497a-be17-9ff13d87c1f4 from datanode DatanodeRegistration(127.0.0.1:44499, datanodeUuid=a043d1df-4c47-430d-8d2f-53005218ff3a, infoPort=33651, infoSecurePort=0, ipcPort=45495, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:56:03,495 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x517b3d990ad7e345 with lease ID 0xe1192eedb63a5645: from storage DS-1acb5661-f751-497a-be17-9ff13d87c1f4 node DatanodeRegistration(127.0.0.1:44499, datanodeUuid=a043d1df-4c47-430d-8d2f-53005218ff3a, infoPort=33651, infoSecurePort=0, ipcPort=45495, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:03,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@769ad76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-36183-hadoop-hdfs-3_4_1-tests_jar-_-any-4326439705151617065/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:03,539 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@458e5bcf{HTTP/1.1, (http/1.1)}{localhost:36183} 2024-11-14T03:56:03,539 INFO [Time-limited test {}] server.Server(415): Started @119551ms 2024-11-14T03:56:03,540 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:03,579 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:03,583 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:03,584 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:03,584 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:03,584 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:03,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c5f7ee0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:03,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@325d7ee2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:03,631 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data7/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:03,631 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data8/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:03,656 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:03,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f0f975941b2d710 with lease ID 0xe1192eedb63a5646: Processing first storage report for DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd from datanode DatanodeRegistration(127.0.0.1:34633, datanodeUuid=d6e9f60f-fd56-4dd8-9005-6e0db52fcd60, infoPort=40537, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:56:03,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f0f975941b2d710 with lease ID 0xe1192eedb63a5646: from storage DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd node DatanodeRegistration(127.0.0.1:34633, datanodeUuid=d6e9f60f-fd56-4dd8-9005-6e0db52fcd60, infoPort=40537, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:03,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f0f975941b2d710 with lease ID 0xe1192eedb63a5646: Processing first storage report for DS-30d4165e-fb8a-4689-8802-6b9b83b88c79 from datanode DatanodeRegistration(127.0.0.1:34633, datanodeUuid=d6e9f60f-fd56-4dd8-9005-6e0db52fcd60, infoPort=40537, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:56:03,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f0f975941b2d710 with lease ID 0xe1192eedb63a5646: from storage DS-30d4165e-fb8a-4689-8802-6b9b83b88c79 node DatanodeRegistration(127.0.0.1:34633, datanodeUuid=d6e9f60f-fd56-4dd8-9005-6e0db52fcd60, infoPort=40537, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:03,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a9d9bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-37717-hadoop-hdfs-3_4_1-tests_jar-_-any-18040858405145580957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:03,720 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47d01054{HTTP/1.1, (http/1.1)}{localhost:37717} 2024-11-14T03:56:03,720 INFO [Time-limited test {}] server.Server(415): Started @119732ms 2024-11-14T03:56:03,721 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:03,803 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:03,803 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9/current/BP-1399754620-172.17.0.2-1731556550838/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:03,827 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:03,829 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91f720f1dd2c3ba9 with lease ID 0xe1192eedb63a5647: Processing first storage report for DS-29e91c96-9e41-442a-83df-1c2172864279 from datanode DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:56:03,829 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91f720f1dd2c3ba9 with lease ID 0xe1192eedb63a5647: from storage DS-29e91c96-9e41-442a-83df-1c2172864279 node DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:03,829 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91f720f1dd2c3ba9 with lease ID 0xe1192eedb63a5647: Processing first storage report for DS-9a58de86-883c-4af5-bf55-b150c86e689d from datanode DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838) 2024-11-14T03:56:03,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91f720f1dd2c3ba9 with lease ID 0xe1192eedb63a5647: from storage DS-9a58de86-883c-4af5-bf55-b150c86e689d node DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:03,844 WARN [ResponseProcessor for block BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,844 WARN [ResponseProcessor for block BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,845 WARN [ResponseProcessor for block BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,845 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta block BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:03,845 WARN [ResponseProcessor for block BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,845 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:03,845 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:03,845 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:03,846 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-692897457_22 at /127.0.0.1:35886 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35886 dst: /127.0.0.1:45391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-692897457_22 at /127.0.0.1:51282 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51282 dst: /127.0.0.1:33561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_610903300_22 at /127.0.0.1:51208 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51208 dst: /127.0.0.1:33561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,847 WARN [PacketResponder: BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45391] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35846 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35846 dst: /127.0.0.1:45391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:51238 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51238 dst: /127.0.0.1:33561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:51252 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51252 dst: /127.0.0.1:33561 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_610903300_22 at /127.0.0.1:35826 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35826 dst: /127.0.0.1:45391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35858 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35858 dst: /127.0.0.1:45391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57f0db4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:03,850 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78ab676e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:03,850 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:03,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75de815b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:03,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28707c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:03,854 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:03,854 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:03,855 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:03,855 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399754620-172.17.0.2-1731556550838 (Datanode Uuid 9afe9bd4-33a6-4808-b0e1-7de0b2745446) service to localhost/127.0.0.1:43619 2024-11-14T03:56:03,856 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data3/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:03,856 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data4/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:03,856 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:03,857 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,858 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,858 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,863 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@f1ad147 {}] datanode.DataXceiver(331): 127.0.0.1:33561:DataXceiver error processing unknown operation src: /127.0.0.1:39468 dst: /127.0.0.1:33561 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:03,864 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta block BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bd89867{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:03,874 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1aa07d80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:03,875 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:03,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53463a69{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:03,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@307e6985{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:03,876 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:03,876 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:03,877 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399754620-172.17.0.2-1731556550838 (Datanode Uuid e3c0dcc4-5c7f-4d8c-b3bb-e265a0f253d0) service to localhost/127.0.0.1:43619 2024-11-14T03:56:03,877 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:03,877 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data1/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:03,877 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data2/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:03,878 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:03,882 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2., hostname=6a989be77d39,46431,1731556551881, seqNum=2] 2024-11-14T03:56:03,884 ERROR [FSHLog-0-hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65-prefix:6a989be77d39,46431,1731556551881 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,884 WARN [FSHLog-0-hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65-prefix:6a989be77d39,46431,1731556551881 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,884 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,884 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C46431%2C1731556551881:(num 1731556552400) roll requested 2024-11-14T03:56:03,885 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.1731556563885 2024-11-14T03:56:03,894 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:03,894 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:03,894 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:03,894 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:03,895 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:03,895 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556563885 2024-11-14T03:56:03,898 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,898 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:03,898 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33651:33651),(127.0.0.1/127.0.0.1:40537:40537)] 2024-11-14T03:56:03,898 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:03,899 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-14T03:56:03,899 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-14T03:56:03,900 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 2024-11-14T03:56:03,903 WARN [IPC Server handler 1 on default port 43619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-14T03:56:03,906 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 after 5ms 2024-11-14T03:56:04,027 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:05,118 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:05,898 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:05,900 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556563885 2024-11-14T03:56:05,901 WARN [ResponseProcessor for block BP-1399754620-172.17.0.2-1731556550838:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1399754620-172.17.0.2-1731556550838:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:05,901 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556563885 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:05,902 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:43016 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43016 dst: /127.0.0.1:44499 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:05,903 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:41672 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41672 dst: /127.0.0.1:34633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:05,904 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21e18edd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:05,905 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65266587{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:05,905 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:05,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2488e9f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:05,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@608d2530{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:05,909 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:05,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:05,909 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399754620-172.17.0.2-1731556550838 (Datanode Uuid a043d1df-4c47-430d-8d2f-53005218ff3a) service to localhost/127.0.0.1:43619 2024-11-14T03:56:05,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:05,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data5/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:05,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data6/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:05,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:06,027 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:07,119 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:07,899 WARN [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]] 2024-11-14T03:56:07,899 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:07,900 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C46431%2C1731556551881:(num 1731556563885) roll requested 2024-11-14T03:56:07,900 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.1731556567900 2024-11-14T03:56:07,907 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:07,907 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:07,907 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741839_1021 2024-11-14T03:56:07,908 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 after 4008ms 2024-11-14T03:56:07,910 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35800 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741839_1021 to mirror 127.0.0.1:33561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:07,910 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35800 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T03:56:07,911 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:07,911 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35800 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35800 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:07,914 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:07,915 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:07,915 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741840_1022 2024-11-14T03:56:07,915 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:07,916 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T03:56:07,927 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:07,927 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:07,927 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:07,927 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:07,927 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:07,927 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556563885 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556567900 2024-11-14T03:56:07,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34633 is added to blk_1073741838_1020 (size=3600) 2024-11-14T03:56:07,941 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34991:34991),(127.0.0.1/127.0.0.1:40537:40537)] 2024-11-14T03:56:07,941 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:07,941 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556563885 is not closed yet, will try archiving it next time 2024-11-14T03:56:08,028 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:08,332 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:09,119 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741838_1020 (size=3600) 2024-11-14T03:56:09,925 WARN [ResponseProcessor for block BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,925 WARN [DataStreamer for file /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556567900 block BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:09,925 WARN [PacketResponder: BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34633] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,926 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35802 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35802 dst: /127.0.0.1:37291 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,926 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:41702 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41702 dst: /127.0.0.1:34633 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@769ad76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:09,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@458e5bcf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:09,928 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:09,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f24e79d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:09,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53c1440a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:09,929 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:09,929 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399754620-172.17.0.2-1731556550838 (Datanode Uuid d6e9f60f-fd56-4dd8-9005-6e0db52fcd60) service to localhost/127.0.0.1:43619 2024-11-14T03:56:09,929 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:09,929 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:09,930 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data7/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:09,930 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data8/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:09,930 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:09,942 WARN [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]] 2024-11-14T03:56:09,942 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,942 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C46431%2C1731556551881:(num 1731556567900) roll requested 2024-11-14T03:56:09,942 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.1731556569942 2024-11-14T03:56:09,946 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,946 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:09,946 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741842_1025 2024-11-14T03:56:09,946 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:09,948 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,948 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:09,948 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741843_1026 2024-11-14T03:56:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46431 {}] regionserver.HRegion(8855): Flush requested on 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:09,949 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:09,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00f2c2bd2dc7930772775ee3b7c158f2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:56:09,951 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,951 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:09,951 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741844_1027 2024-11-14T03:56:09,952 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:09,954 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,954 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:09,954 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741845_1028 2024-11-14T03:56:09,956 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:09,958 WARN [IPC Server handler 3 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:09,958 WARN [IPC Server handler 3 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:09,958 WARN [IPC Server handler 3 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:09,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:09,964 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:09,965 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:09,965 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:09,965 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:09,965 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556567900 with entries=9, filesize=9.53 KB; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556569942 2024-11-14T03:56:09,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741841_1024 (size=9768) 2024-11-14T03:56:09,969 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34991:34991)] 2024-11-14T03:56:09,969 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:09,969 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556567900 is not closed yet, will try archiving it next time 2024-11-14T03:56:09,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/d6f8b2a727d94263820898ae81307548 is 1080, key is row0002/info:/1731556565912/Put/seqid=0 2024-11-14T03:56:09,975 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,976 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:09,976 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741847_1030 2024-11-14T03:56:09,976 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:09,979 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35836 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741848_1031 to mirror 127.0.0.1:33561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,979 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:09,979 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741848_1031 2024-11-14T03:56:09,979 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35836 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:09,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35836 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35836 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,980 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:09,983 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44499 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,982 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35842 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741849_1032 to mirror 127.0.0.1:44499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,983 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:09,983 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741849_1032 2024-11-14T03:56:09,983 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35842 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:09,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35842 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35842 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:09,983 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:09,985 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:09,985 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:09,985 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741850_1033 2024-11-14T03:56:09,985 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:09,986 WARN [IPC Server handler 0 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:09,986 WARN [IPC Server handler 0 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:09,986 WARN [IPC Server handler 0 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:09,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741851_1034 (size=10347) 2024-11-14T03:56:10,028 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:10,368 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:10,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/d6f8b2a727d94263820898ae81307548 2024-11-14T03:56:10,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/d6f8b2a727d94263820898ae81307548 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/d6f8b2a727d94263820898ae81307548 2024-11-14T03:56:10,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/d6f8b2a727d94263820898ae81307548, entries=5, sequenceid=11, filesize=10.1 K 2024-11-14T03:56:10,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 00f2c2bd2dc7930772775ee3b7c158f2 in 457ms, sequenceid=11, compaction requested=false 2024-11-14T03:56:10,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:10,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46431 {}] regionserver.HRegion(8855): Flush requested on 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:10,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00f2c2bd2dc7930772775ee3b7c158f2 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-14T03:56:10,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/9a859d0446b547afb78a12cf00f47b8d is 1080, key is row0007/info:/1731556569950/Put/seqid=0 2024-11-14T03:56:10,594 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:10,594 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:10,594 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741852_1035 2024-11-14T03:56:10,595 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:10,596 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:10,596 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:10,596 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741853_1036 2024-11-14T03:56:10,597 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:10,599 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35878 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741854_1037 to mirror 127.0.0.1:33561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:10,599 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:10,599 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35878 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:10,599 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:10,600 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741854_1037 2024-11-14T03:56:10,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35878 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35878 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:10,600 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:10,602 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34633 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:10,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35882 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741855_1038 to mirror 127.0.0.1:34633 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:10,602 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:10,602 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741855_1038 2024-11-14T03:56:10,602 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35882 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:10,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:35882 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35882 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:10,603 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:10,603 WARN [IPC Server handler 4 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:10,603 WARN [IPC Server handler 4 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:10,603 WARN [IPC Server handler 4 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:10,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741856_1039 (size=12506) 2024-11-14T03:56:11,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/9a859d0446b547afb78a12cf00f47b8d 2024-11-14T03:56:11,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/9a859d0446b547afb78a12cf00f47b8d as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d 2024-11-14T03:56:11,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d, entries=7, sequenceid=24, filesize=12.2 K 2024-11-14T03:56:11,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 00f2c2bd2dc7930772775ee3b7c158f2 in 437ms, sequenceid=24, compaction requested=false 2024-11-14T03:56:11,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:11,024 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-14T03:56:11,024 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:11,024 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d because midkey is the same as first or last row 2024-11-14T03:56:11,119 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:11,970 WARN [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]] 2024-11-14T03:56:11,970 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:11,970 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C46431%2C1731556551881:(num 1731556569942) roll requested 2024-11-14T03:56:11,970 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.1731556571970 2024-11-14T03:56:11,974 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:11,974 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:11,974 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741857_1040 2024-11-14T03:56:11,974 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:11,976 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:11,976 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:11,976 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741858_1041 2024-11-14T03:56:11,976 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:11,978 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:11,978 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:11,978 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741859_1042 2024-11-14T03:56:11,978 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:11,980 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:11,980 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:11,980 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741860_1043 2024-11-14T03:56:11,981 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:11,981 WARN [IPC Server handler 2 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:11,982 WARN [IPC Server handler 2 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:11,982 WARN [IPC Server handler 2 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:11,995 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:11,995 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:11,995 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:11,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:11,995 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:11,996 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556569942 with entries=15, filesize=14.79 KB; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556571970 2024-11-14T03:56:11,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741846_1029 (size=15148) 2024-11-14T03:56:11,998 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:11,999 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556563885 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs/6a989be77d39%2C46431%2C1731556551881.1731556563885 2024-11-14T03:56:12,000 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34991:34991)] 2024-11-14T03:56:12,000 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 is not closed yet, will try archiving it next time 2024-11-14T03:56:12,001 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556567900 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs/6a989be77d39%2C46431%2C1731556551881.1731556567900 2024-11-14T03:56:12,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46431 {}] regionserver.HRegion(8855): Flush requested on 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:12,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00f2c2bd2dc7930772775ee3b7c158f2 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T03:56:12,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/2bf88e08b98d44a9be114ab3a1666663 is 1079, key is tmprow/info:/1731556572005/Put/seqid=0 2024-11-14T03:56:12,013 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,014 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:12,014 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741862_1045 2024-11-14T03:56:12,014 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:12,018 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34633 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38026 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741863_1046 to mirror 127.0.0.1:34633 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:12,018 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:12,018 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741863_1046 2024-11-14T03:56:12,018 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38026 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:12,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38026 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38026 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:12,019 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:12,021 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44499 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,021 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38030 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741864_1047 to mirror 127.0.0.1:44499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:12,022 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:12,022 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741864_1047 2024-11-14T03:56:12,022 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38030 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:12,022 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38030 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38030 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:12,022 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:12,023 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,023 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:12,023 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741865_1048 2024-11-14T03:56:12,024 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:12,025 WARN [IPC Server handler 3 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:12,025 WARN [IPC Server handler 3 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:12,025 WARN [IPC Server handler 3 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:12,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741866_1049 (size=6027) 2024-11-14T03:56:12,028 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/2bf88e08b98d44a9be114ab3a1666663 2024-11-14T03:56:12,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/2bf88e08b98d44a9be114ab3a1666663 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/2bf88e08b98d44a9be114ab3a1666663 2024-11-14T03:56:12,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/2bf88e08b98d44a9be114ab3a1666663, entries=1, sequenceid=34, filesize=5.9 K 2024-11-14T03:56:12,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 00f2c2bd2dc7930772775ee3b7c158f2 in 444ms, sequenceid=34, compaction requested=true 2024-11-14T03:56:12,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:12,452 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-14T03:56:12,452 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:12,452 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d because midkey is the same as first or last row 2024-11-14T03:56:12,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f2c2bd2dc7930772775ee3b7c158f2:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:56:12,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:56:12,453 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:56:12,456 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:56:12,456 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HStore(1541): 00f2c2bd2dc7930772775ee3b7c158f2/info is initiating minor compaction (all files) 2024-11-14T03:56:12,456 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 00f2c2bd2dc7930772775ee3b7c158f2/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:12,456 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/d6f8b2a727d94263820898ae81307548, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/2bf88e08b98d44a9be114ab3a1666663] into tmpdir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp, totalSize=28.2 K 2024-11-14T03:56:12,457 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6f8b2a727d94263820898ae81307548, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731556565912 2024-11-14T03:56:12,457 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9a859d0446b547afb78a12cf00f47b8d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731556569950 2024-11-14T03:56:12,458 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2bf88e08b98d44a9be114ab3a1666663, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731556572005 2024-11-14T03:56:12,488 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f2c2bd2dc7930772775ee3b7c158f2#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:56:12,489 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/47ff91a97a3f437d96a69a29a08ec670 is 1080, key is row0002/info:/1731556565912/Put/seqid=0 2024-11-14T03:56:12,491 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,491 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:12,491 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741867_1050 2024-11-14T03:56:12,492 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:12,493 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,494 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:12,494 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741868_1051 2024-11-14T03:56:12,494 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:12,496 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,496 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:12,496 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741869_1052 2024-11-14T03:56:12,501 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:12,504 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:12,504 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:12,504 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741870_1053 2024-11-14T03:56:12,505 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:12,505 WARN [IPC Server handler 2 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:12,506 WARN [IPC Server handler 2 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:12,506 WARN [IPC Server handler 2 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:12,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741871_1054 (size=17994) 2024-11-14T03:56:12,527 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/47ff91a97a3f437d96a69a29a08ec670 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 2024-11-14T03:56:12,545 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 00f2c2bd2dc7930772775ee3b7c158f2/info of 00f2c2bd2dc7930772775ee3b7c158f2 into 47ff91a97a3f437d96a69a29a08ec670(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:56:12,545 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:12,545 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2., storeName=00f2c2bd2dc7930772775ee3b7c158f2/info, priority=13, startTime=1731556572452; duration=0sec 2024-11-14T03:56:12,545 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T03:56:12,545 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:12,545 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 because midkey is the same as first or last row 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 because midkey is the same as first or last row 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 because midkey is the same as first or last row 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:56:12,546 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f2c2bd2dc7930772775ee3b7c158f2:info 2024-11-14T03:56:12,838 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5f075f60[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741841_1024 to 127.0.0.1:33561 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:12,840 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3360bc31[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741851_1034 to 127.0.0.1:44499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:13,120 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:13,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46431 {}] regionserver.HRegion(8855): Flush requested on 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:13,440 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00f2c2bd2dc7930772775ee3b7c158f2 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T03:56:13,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/1182dae58de64bd0b4135b73bd56f778 is 1079, key is tmprow/info:/1731556573439/Put/seqid=0 2024-11-14T03:56:13,448 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:13,448 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:13,448 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741872_1055 2024-11-14T03:56:13,449 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:13,451 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45391 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:13,451 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38064 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741873_1056 to mirror 127.0.0.1:45391 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:13,452 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]) is bad. 2024-11-14T03:56:13,452 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741873_1056 2024-11-14T03:56:13,452 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38064 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:13,452 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:38064 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38064 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:13,452 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45391,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK] 2024-11-14T03:56:13,453 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:13,454 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:13,454 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741874_1057 2024-11-14T03:56:13,454 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:13,455 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:13,456 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:13,456 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741875_1058 2024-11-14T03:56:13,456 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:13,457 WARN [IPC Server handler 1 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T03:56:13,457 WARN [IPC Server handler 1 on default port 43619 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T03:56:13,457 WARN [IPC Server handler 1 on default port 43619 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T03:56:13,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741876_1059 (size=6027) 2024-11-14T03:56:13,837 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5f075f60[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741846_1029 to 127.0.0.1:34633 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:13,837 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3360bc31[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741856_1039 to 127.0.0.1:44499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:13,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/1182dae58de64bd0b4135b73bd56f778 2024-11-14T03:56:13,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/1182dae58de64bd0b4135b73bd56f778 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/1182dae58de64bd0b4135b73bd56f778 2024-11-14T03:56:13,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/1182dae58de64bd0b4135b73bd56f778, entries=1, sequenceid=45, filesize=5.9 K 2024-11-14T03:56:13,890 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 00f2c2bd2dc7930772775ee3b7c158f2 in 450ms, sequenceid=45, compaction requested=false 2024-11-14T03:56:13,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:13,890 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-14T03:56:13,890 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:13,890 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 because midkey is the same as first or last row 2024-11-14T03:56:14,000 WARN [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-14T03:56:14,000 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:14,029 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:14,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:14,065 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:14,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:14,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:14,098 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:14,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21cc8d76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:14,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32cdb76b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:14,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@95c65e3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/java.io.tmpdir/jetty-localhost-40153-hadoop-hdfs-3_4_1-tests_jar-_-any-17761410200518114705/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:14,232 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2aaad1eb{HTTP/1.1, (http/1.1)}{localhost:40153} 2024-11-14T03:56:14,232 INFO [Time-limited test {}] server.Server(415): Started @130244ms 2024-11-14T03:56:14,233 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:14,391 WARN [Thread-977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:14,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2f622274cf04790 with lease ID 0xe1192eedb63a5648: from storage DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa node DatanodeRegistration(127.0.0.1:44609, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=40141, infoSecurePort=0, ipcPort=33599, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:14,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2f622274cf04790 with lease ID 0xe1192eedb63a5648: from storage DS-66514651-f8c3-401d-a0f7-b9df137518b2 node DatanodeRegistration(127.0.0.1:44609, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=40141, infoSecurePort=0, ipcPort=33599, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:15,120 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:15,838 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3360bc31[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741866_1049 to 127.0.0.1:44499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:15,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741871_1054 (size=17994) 2024-11-14T03:56:16,001 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:16,029 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:16,838 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5f075f60[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37291, datanodeUuid=39a6d0c1-203e-4807-bb4e-95c3fb4af9c2, infoPort=34991, infoSecurePort=0, ipcPort=39347, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741876_1059 to 127.0.0.1:44499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:17,121 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:18,001 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:18,030 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:19,121 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:20,002 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:20,030 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:21,122 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:21,758 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:56:22,002 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,031 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,154 ERROR [FSHLog-0-hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData-prefix:6a989be77d39,44171,1731556551778 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,154 WARN [FSHLog-0-hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData-prefix:6a989be77d39,44171,1731556551778 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,154 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C44171%2C1731556551778:(num 1731556552065) roll requested 2024-11-14T03:56:22,155 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C44171%2C1731556551778.1731556582154 2024-11-14T03:56:22,158 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,158 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:44609,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:22,158 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741877_1060 2024-11-14T03:56:22,159 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:22,160 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,161 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK], DatanodeInfoWithStorage[127.0.0.1:44609,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]) is bad. 2024-11-14T03:56:22,161 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741878_1061 2024-11-14T03:56:22,161 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK] 2024-11-14T03:56:22,162 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,163 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK], DatanodeInfoWithStorage[127.0.0.1:44609,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK]) is bad. 2024-11-14T03:56:22,163 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741879_1062 2024-11-14T03:56:22,163 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44499,DS-1a5df2ca-bbf7-4b01-a4ad-55e1737ab138,DISK] 2024-11-14T03:56:22,167 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:22,167 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:22,167 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:22,167 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:22,168 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:22,168 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556582154 2024-11-14T03:56:22,168 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,168 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:22,169 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 2024-11-14T03:56:22,169 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40141:40141),(127.0.0.1/127.0.0.1:34991:34991)] 2024-11-14T03:56:22,169 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 is not closed yet, will try archiving it next time 2024-11-14T03:56:22,169 WARN [IPC Server handler 4 on default port 43619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-14T03:56:22,169 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 after 0ms 2024-11-14T03:56:23,122 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:24,002 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:24,446 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@31f66a22 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1399754620-172.17.0.2-1731556550838:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:33561,null,null]) java.net.ConnectException: Call From 6a989be77d39/172.17.0.2 to localhost:42993 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T03:56:24,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741833_1019 (size=455) 2024-11-14T03:56:24,938 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556552400 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs/6a989be77d39%2C46431%2C1731556551881.1731556552400 2024-11-14T03:56:24,939 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556569942 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs/6a989be77d39%2C46431%2C1731556551881.1731556569942 2024-11-14T03:56:25,122 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:26,003 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:26,171 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/WALs/6a989be77d39,44171,1731556551778/6a989be77d39%2C44171%2C1731556551778.1731556552065 after 4002ms 2024-11-14T03:56:27,123 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:27,420 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@53fc424a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44609, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=40141, infoSecurePort=0, ipcPort=33599, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741835_1011 to 127.0.0.1:34633 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:27,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741833_1019 (size=455) 2024-11-14T03:56:28,005 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:28,420 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@8ddaa9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44609, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=40141, infoSecurePort=0, ipcPort=33599, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741831_1007 to 127.0.0.1:34633 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:28,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:56:29,123 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:29,680 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.1731556589680 2024-11-14T03:56:29,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:29,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:29,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:29,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:29,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:29,688 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556571970 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556589680 2024-11-14T03:56:29,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40141:40141),(127.0.0.1/127.0.0.1:34991:34991)] 2024-11-14T03:56:29,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556571970 is not closed yet, will try archiving it next time 2024-11-14T03:56:29,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741861_1044 (size=13591) 2024-11-14T03:56:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46431 {}] regionserver.HRegion(8855): Flush requested on 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:29,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00f2c2bd2dc7930772775ee3b7c158f2 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T03:56:29,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/6236885c651444868650f2d867386b11 is 1080, key is row0013/info:/1731556589691/Put/seqid=0 2024-11-14T03:56:29,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741882_1066 (size=11421) 2024-11-14T03:56:29,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741882_1066 (size=11421) 2024-11-14T03:56:29,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/6236885c651444868650f2d867386b11 2024-11-14T03:56:29,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/6236885c651444868650f2d867386b11 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/6236885c651444868650f2d867386b11 2024-11-14T03:56:29,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/6236885c651444868650f2d867386b11, entries=6, sequenceid=55, filesize=11.2 K 2024-11-14T03:56:29,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 00f2c2bd2dc7930772775ee3b7c158f2 in 34ms, sequenceid=55, compaction requested=true 2024-11-14T03:56:29,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:29,744 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-14T03:56:29,744 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:29,744 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 because midkey is the same as first or last row 2024-11-14T03:56:29,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f2c2bd2dc7930772775ee3b7c158f2:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:56:29,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:56:29,744 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:56:29,745 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:56:29,746 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HStore(1541): 00f2c2bd2dc7930772775ee3b7c158f2/info is initiating minor compaction (all files) 2024-11-14T03:56:29,746 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 00f2c2bd2dc7930772775ee3b7c158f2/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:29,746 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/1182dae58de64bd0b4135b73bd56f778, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/6236885c651444868650f2d867386b11] into tmpdir=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp, totalSize=34.6 K 2024-11-14T03:56:29,746 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.Compactor(225): Compacting 47ff91a97a3f437d96a69a29a08ec670, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731556565912 2024-11-14T03:56:29,747 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1182dae58de64bd0b4135b73bd56f778, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731556573439 2024-11-14T03:56:29,747 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6236885c651444868650f2d867386b11, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731556573845 2024-11-14T03:56:29,766 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f2c2bd2dc7930772775ee3b7c158f2#info#compaction#24 average throughput is 5.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:56:29,767 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/3665767c498e4a828e61fb7d57da5b12 is 1080, key is row0002/info:/1731556565912/Put/seqid=0 2024-11-14T03:56:29,769 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:29,769 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:29,769 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741883_1067 2024-11-14T03:56:29,769 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:29,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741884_1068 (size=23502) 2024-11-14T03:56:29,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741884_1068 (size=23502) 2024-11-14T03:56:29,784 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/3665767c498e4a828e61fb7d57da5b12 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/3665767c498e4a828e61fb7d57da5b12 2024-11-14T03:56:29,792 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 00f2c2bd2dc7930772775ee3b7c158f2/info of 00f2c2bd2dc7930772775ee3b7c158f2 into 3665767c498e4a828e61fb7d57da5b12(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:29,792 INFO [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2., storeName=00f2c2bd2dc7930772775ee3b7c158f2/info, priority=13, startTime=1731556589744; duration=0sec 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/3665767c498e4a828e61fb7d57da5b12 because midkey is the same as first or last row 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:29,792 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/3665767c498e4a828e61fb7d57da5b12 because midkey is the same as first or last row 2024-11-14T03:56:29,793 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-14T03:56:29,793 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:29,793 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/3665767c498e4a828e61fb7d57da5b12 because midkey is the same as first or last row 2024-11-14T03:56:29,793 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:56:29,793 DEBUG [RS:0;6a989be77d39:46431-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f2c2bd2dc7930772775ee3b7c158f2:info 2024-11-14T03:56:29,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46431 {}] regionserver.HRegion(8855): Flush requested on 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:29,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 00f2c2bd2dc7930772775ee3b7c158f2 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T03:56:29,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/74e77e14ae994a3fa450b3d6df28f0e7 is 1080, key is row0018/info:/1731556589711/Put/seqid=0 2024-11-14T03:56:29,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741885_1069 (size=11421) 2024-11-14T03:56:29,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741885_1069 (size=11421) 2024-11-14T03:56:29,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/74e77e14ae994a3fa450b3d6df28f0e7 2024-11-14T03:56:29,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/.tmp/info/74e77e14ae994a3fa450b3d6df28f0e7 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/74e77e14ae994a3fa450b3d6df28f0e7 2024-11-14T03:56:29,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/74e77e14ae994a3fa450b3d6df28f0e7, entries=6, sequenceid=66, filesize=11.2 K 2024-11-14T03:56:29,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 00f2c2bd2dc7930772775ee3b7c158f2 in 31ms, sequenceid=66, compaction requested=false 2024-11-14T03:56:29,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 00f2c2bd2dc7930772775ee3b7c158f2: 2024-11-14T03:56:29,960 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-14T03:56:29,960 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:56:29,960 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/3665767c498e4a828e61fb7d57da5b12 because midkey is the same as first or last row 2024-11-14T03:56:30,006 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-14T03:56:30,006 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,091 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.1731556571970 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs/6a989be77d39%2C46431%2C1731556551881.1731556571970 2024-11-14T03:56:30,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:56:30,130 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:56:30,130 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:56:30,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:30,131 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:30,131 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:56:30,131 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:56:30,131 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1031313255, stopped=false 2024-11-14T03:56:30,131 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,44171,1731556551778 2024-11-14T03:56:30,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:56:30,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:30,134 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:56:30,134 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:56:30,134 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:56:30,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:30,134 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,46431,1731556551881' ***** 2024-11-14T03:56:30,134 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:56:30,134 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,36173,1731556553047' ***** 2024-11-14T03:56:30,135 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:56:30,135 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:56:30,135 INFO [RS:0;6a989be77d39:46431 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:56:30,135 INFO [RS:0;6a989be77d39:46431 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:56:30,135 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(3091): Received CLOSE for 00f2c2bd2dc7930772775ee3b7c158f2 2024-11-14T03:56:30,135 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:56:30,137 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:56:30,137 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,46431,1731556551881 2024-11-14T03:56:30,137 INFO [RS:0;6a989be77d39:46431 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:56:30,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:56:30,137 INFO [RS:0;6a989be77d39:46431 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:46431. 2024-11-14T03:56:30,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:30,137 DEBUG [RS:0;6a989be77d39:46431 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:56:30,138 DEBUG [RS:0;6a989be77d39:46431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:56:30,137 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 00f2c2bd2dc7930772775ee3b7c158f2, disabling compactions & flushes 2024-11-14T03:56:30,138 INFO [RS:0;6a989be77d39:46431 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:56:30,138 INFO [RS:0;6a989be77d39:46431 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:56:30,138 INFO [RS:0;6a989be77d39:46431 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:56:30,138 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:30,138 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:56:30,138 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:30,138 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. after waiting 0 ms 2024-11-14T03:56:30,138 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:30,138 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:56:30,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:30,141 INFO [RS:1;6a989be77d39:36173 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:56:30,141 INFO [RS:1;6a989be77d39:36173 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:56:30,141 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,36173,1731556553047 2024-11-14T03:56:30,141 INFO [RS:1;6a989be77d39:36173 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:56:30,142 INFO [RS:1;6a989be77d39:36173 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6a989be77d39:36173. 2024-11-14T03:56:30,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:56:30,142 DEBUG [RS:1;6a989be77d39:36173 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:56:30,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:56:30,142 DEBUG [RS:1;6a989be77d39:36173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:30,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:56:30,143 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,36173,1731556553047; all regions closed. 2024-11-14T03:56:30,146 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T03:56:30,146 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1325): Online Regions={00f2c2bd2dc7930772775ee3b7c158f2=TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T03:56:30,146 DEBUG [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1351): Waiting on 00f2c2bd2dc7930772775ee3b7c158f2, 1588230740 2024-11-14T03:56:30,145 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/d6f8b2a727d94263820898ae81307548, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/2bf88e08b98d44a9be114ab3a1666663, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/1182dae58de64bd0b4135b73bd56f778, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/6236885c651444868650f2d867386b11] to archive 2024-11-14T03:56:30,146 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:56:30,146 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:56:30,146 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:56:30,146 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:56:30,146 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:56:30,146 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-14T03:56:30,147 ERROR [FSHLog-0-hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65-prefix:6a989be77d39,46431,1731556551881.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,147 WARN [FSHLog-0-hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65-prefix:6a989be77d39,46431,1731556551881.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,147 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C46431%2C1731556551881.meta:.meta(num 1731556552817) roll requested 2024-11-14T03:56:30,147 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46431%2C1731556551881.meta.1731556590147.meta 2024-11-14T03:56:30,148 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T03:56:30,149 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,149 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,149 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,150 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,150 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,150 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/d6f8b2a727d94263820898ae81307548 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/d6f8b2a727d94263820898ae81307548 2024-11-14T03:56:30,151 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,151 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,151 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 2024-11-14T03:56:30,152 WARN [IPC Server handler 4 on default port 43619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741837_1013 2024-11-14T03:56:30,152 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 after 1ms 2024-11-14T03:56:30,153 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/9a859d0446b547afb78a12cf00f47b8d 2024-11-14T03:56:30,156 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/47ff91a97a3f437d96a69a29a08ec670 2024-11-14T03:56:30,157 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,157 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,157 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,157 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/2bf88e08b98d44a9be114ab3a1666663 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/2bf88e08b98d44a9be114ab3a1666663 2024-11-14T03:56:30,157 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,158 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556590147.meta 2024-11-14T03:56:30,158 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,158 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33561,DS-54151df9-487a-44ab-b5ce-19ad1242def2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,158 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta 2024-11-14T03:56:30,159 WARN [IPC Server handler 2 on default port 43619 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-11-14T03:56:30,159 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34991:34991),(127.0.0.1/127.0.0.1:40141:40141)] 2024-11-14T03:56:30,159 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta is not closed yet, will try archiving it next time 2024-11-14T03:56:30,159 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta after 1ms 2024-11-14T03:56:30,159 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/1182dae58de64bd0b4135b73bd56f778 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/1182dae58de64bd0b4135b73bd56f778 2024-11-14T03:56:30,161 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/6236885c651444868650f2d867386b11 to hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/info/6236885c651444868650f2d867386b11 2024-11-14T03:56:30,162 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6a989be77d39:44171 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T03:56:30,162 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d6f8b2a727d94263820898ae81307548=10347, 9a859d0446b547afb78a12cf00f47b8d=12506, 47ff91a97a3f437d96a69a29a08ec670=17994, 2bf88e08b98d44a9be114ab3a1666663=6027, 1182dae58de64bd0b4135b73bd56f778=6027, 6236885c651444868650f2d867386b11=11421] 2024-11-14T03:56:30,171 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/default/TestLogRolling-testLogRollOnDatanodeDeath/00f2c2bd2dc7930772775ee3b7c158f2/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-14T03:56:30,172 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:30,172 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 00f2c2bd2dc7930772775ee3b7c158f2: Waiting for close lock at 1731556590137Running coprocessor pre-close hooks at 1731556590137Disabling compacts and flushes for region at 1731556590137Disabling writes for close at 1731556590138 (+1 ms)Writing region close event to WAL at 1731556590163 (+25 ms)Running coprocessor post-close hooks at 1731556590172 (+9 ms)Closed at 1731556590172 2024-11-14T03:56:30,173 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2. 2024-11-14T03:56:30,179 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/info/75783a8839ce41beb2be0c97f3a125ef is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731556553159.00f2c2bd2dc7930772775ee3b7c158f2./info:regioninfo/1731556553961/Put/seqid=0 2024-11-14T03:56:30,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:43972 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741887_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10]'}, localName='127.0.0.1:37291', datanodeUuid='39a6d0c1-203e-4807-bb4e-95c3fb4af9c2', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741887_1073 to mirror 127.0.0.1:34633 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:30,182 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34633 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,183 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:43972 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741887_1073] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:30,183 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37291,DS-29e91c96-9e41-442a-83df-1c2172864279,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:30,183 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741887_1073 2024-11-14T03:56:30,183 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:43972 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741887_1073] {}] datanode.DataXceiver(331): 127.0.0.1:37291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43972 dst: /127.0.0.1:37291 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:30,183 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:30,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741888_1074 (size=7089) 2024-11-14T03:56:30,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741888_1074 (size=7089) 2024-11-14T03:56:30,192 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/info/75783a8839ce41beb2be0c97f3a125ef 2024-11-14T03:56:30,222 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/ns/996b8277c6384863964e2e419bc0f9b3 is 43, key is default/ns:d/1731556552895/Put/seqid=0 2024-11-14T03:56:30,229 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34633 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:30,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:46972 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data4]'}, localName='127.0.0.1:44609', datanodeUuid='9afe9bd4-33a6-4808-b0e1-7de0b2745446', xmitsInProgress=0}:Exception transferring block BP-1399754620-172.17.0.2-1731556550838:blk_1073741889_1075 to mirror 127.0.0.1:34633 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:30,229 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1399754620-172.17.0.2-1731556550838:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44609,DS-6b54cd8c-e025-494c-8f69-89ab6d84a7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK]) is bad. 2024-11-14T03:56:30,229 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-1399754620-172.17.0.2-1731556550838:blk_1073741889_1075 2024-11-14T03:56:30,229 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:46972 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T03:56:30,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1147634268_22 at /127.0.0.1:46972 [Receiving block BP-1399754620-172.17.0.2-1731556550838:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:44609:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46972 dst: /127.0.0.1:44609 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:30,230 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34633,DS-b1131380-b5ef-4ed4-b7ad-d832aa00a7cd,DISK] 2024-11-14T03:56:30,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741890_1076 (size=5153) 2024-11-14T03:56:30,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741890_1076 (size=5153) 2024-11-14T03:56:30,240 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/ns/996b8277c6384863964e2e419bc0f9b3 2024-11-14T03:56:30,250 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T03:56:30,250 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T03:56:30,252 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:56:30,267 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/table/65e8f5a8f493438ead071feee16b59de is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731556553984/Put/seqid=0 2024-11-14T03:56:30,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741891_1077 (size=5424) 2024-11-14T03:56:30,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741891_1077 (size=5424) 2024-11-14T03:56:30,275 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/table/65e8f5a8f493438ead071feee16b59de 2024-11-14T03:56:30,286 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/info/75783a8839ce41beb2be0c97f3a125ef as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/info/75783a8839ce41beb2be0c97f3a125ef 2024-11-14T03:56:30,294 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/info/75783a8839ce41beb2be0c97f3a125ef, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T03:56:30,295 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/ns/996b8277c6384863964e2e419bc0f9b3 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/ns/996b8277c6384863964e2e419bc0f9b3 2024-11-14T03:56:30,301 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/ns/996b8277c6384863964e2e419bc0f9b3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T03:56:30,302 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/.tmp/table/65e8f5a8f493438ead071feee16b59de as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/table/65e8f5a8f493438ead071feee16b59de 2024-11-14T03:56:30,309 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/table/65e8f5a8f493438ead071feee16b59de, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T03:56:30,311 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 165ms, sequenceid=11, compaction requested=false 2024-11-14T03:56:30,325 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T03:56:30,326 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:56:30,326 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:56:30,326 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556590146Running coprocessor pre-close hooks at 1731556590146Disabling compacts and flushes for region at 1731556590146Disabling writes for close at 1731556590146Obtaining lock to block concurrent updates at 1731556590146Preparing flush snapshotting stores in 1588230740 at 1731556590146Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731556590147 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731556590160 (+13 ms)Flushing 1588230740/info: creating writer at 1731556590160Flushing 1588230740/info: appending metadata at 1731556590179 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731556590179Flushing 1588230740/ns: creating writer at 1731556590199 (+20 ms)Flushing 1588230740/ns: appending metadata at 1731556590221 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731556590222 (+1 ms)Flushing 1588230740/table: creating writer at 1731556590246 (+24 ms)Flushing 1588230740/table: appending metadata at 1731556590267 (+21 ms)Flushing 1588230740/table: closing flushed file at 1731556590267Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@91779f0: reopening flushed file at 1731556590283 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66a05612: reopening flushed file at 1731556590294 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25b30546: reopening flushed file at 1731556590301 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 165ms, sequenceid=11, compaction requested=false at 1731556590311 (+10 ms)Writing region close event to WAL at 1731556590321 (+10 ms)Running coprocessor post-close hooks at 1731556590326 (+5 ms)Closed at 1731556590326 2024-11-14T03:56:30,326 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:56:30,346 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,46431,1731556551881; all regions closed. 2024-11-14T03:56:30,346 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,347 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,347 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,347 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,347 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:30,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741886_1070 (size=825) 2024-11-14T03:56:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741886_1070 (size=825) 2024-11-14T03:56:30,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741861_1044 (size=13591) 2024-11-14T03:56:30,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T03:56:30,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:56:30,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:56:31,126 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:56:31,131 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T03:56:31,131 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T03:56:31,420 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@53fc424a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44609, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=40141, infoSecurePort=0, ipcPort=33599, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741836_1012 to 127.0.0.1:34633 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:31,421 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@8ddaa9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44609, datanodeUuid=9afe9bd4-33a6-4808-b0e1-7de0b2745446, infoPort=40141, infoSecurePort=0, ipcPort=33599, storageInfo=lv=-57;cid=testClusterID;nsid=483071342;c=1731556550838):Failed to transfer BP-1399754620-172.17.0.2-1731556550838:blk_1073741832_1008 to 127.0.0.1:34633 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:32,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:56:32,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:56:32,918 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T03:56:32,918 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T03:56:33,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:56:33,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:56:33,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741838_1020 (size=3600) 2024-11-14T03:56:34,153 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 after 4002ms 2024-11-14T03:56:34,160 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta after 4002ms 2024-11-14T03:56:34,451 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2b6dac75 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1399754620-172.17.0.2-1731556550838:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:33561,null,null]) java.net.ConnectException: Call From 6a989be77d39/172.17.0.2 to localhost:42993 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T03:56:34,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741830_1064 (size=27318) 2024-11-14T03:56:35,151 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T03:56:35,155 DEBUG [RS:1;6a989be77d39:36173 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs 2024-11-14T03:56:35,155 INFO [RS:1;6a989be77d39:36173 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C36173%2C1731556553047:(num 1731556553260) 2024-11-14T03:56:35,155 DEBUG [RS:1;6a989be77d39:36173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:35,155 INFO [RS:1;6a989be77d39:36173 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:56:35,156 INFO [RS:1;6a989be77d39:36173 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36173 2024-11-14T03:56:35,157 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:56:35,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,36173,1731556553047 2024-11-14T03:56:35,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:56:35,159 INFO [RS:1;6a989be77d39:36173 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:56:35,165 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,36173,1731556553047] 2024-11-14T03:56:35,166 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,36173,1731556553047 already deleted, retry=false 2024-11-14T03:56:35,166 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,36173,1731556553047 expired; onlineServers=1 2024-11-14T03:56:35,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:56:35,262 INFO [RS:1;6a989be77d39:36173 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:56:35,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36173-0x100ef6320fb0002, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:56:35,262 INFO [RS:1;6a989be77d39:36173 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,36173,1731556553047; zookeeper connection closed. 2024-11-14T03:56:35,262 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@616cf69e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@616cf69e 2024-11-14T03:56:35,347 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T03:56:35,352 DEBUG [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs 2024-11-14T03:56:35,352 INFO [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C46431%2C1731556551881.meta:.meta(num 1731556590147) 2024-11-14T03:56:35,352 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,353 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,353 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,353 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,353 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741881_1065 (size=16308) 2024-11-14T03:56:35,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741881_1065 (size=16308) 2024-11-14T03:56:35,364 DEBUG [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/oldWALs 2024-11-14T03:56:35,364 INFO [RS:0;6a989be77d39:46431 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C46431%2C1731556551881:(num 1731556589680) 2024-11-14T03:56:35,364 DEBUG [RS:0;6a989be77d39:46431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:35,364 INFO [RS:0;6a989be77d39:46431 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:56:35,364 INFO [RS:0;6a989be77d39:46431 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:56:35,364 INFO [RS:0;6a989be77d39:46431 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T03:56:35,364 INFO [RS:0;6a989be77d39:46431 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:56:35,364 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:56:35,364 INFO [RS:0;6a989be77d39:46431 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46431 2024-11-14T03:56:35,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:56:35,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,46431,1731556551881 2024-11-14T03:56:35,367 INFO [RS:0;6a989be77d39:46431 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:56:35,369 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,46431,1731556551881] 2024-11-14T03:56:35,370 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,46431,1731556551881 already deleted, retry=false 2024-11-14T03:56:35,370 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,46431,1731556551881 expired; onlineServers=0 2024-11-14T03:56:35,370 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,44171,1731556551778' ***** 2024-11-14T03:56:35,370 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:56:35,370 INFO [M:0;6a989be77d39:44171 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:56:35,371 INFO [M:0;6a989be77d39:44171 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:56:35,371 DEBUG [M:0;6a989be77d39:44171 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:56:35,371 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:56:35,371 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556552155 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556552155,5,FailOnTimeoutGroup] 2024-11-14T03:56:35,371 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556552155 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556552155,5,FailOnTimeoutGroup] 2024-11-14T03:56:35,371 DEBUG [M:0;6a989be77d39:44171 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:56:35,371 INFO [M:0;6a989be77d39:44171 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:56:35,372 INFO [M:0;6a989be77d39:44171 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:56:35,372 DEBUG [M:0;6a989be77d39:44171 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:56:35,372 INFO [M:0;6a989be77d39:44171 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:56:35,372 INFO [M:0;6a989be77d39:44171 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:56:35,372 INFO [M:0;6a989be77d39:44171 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:56:35,372 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:56:35,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:56:35,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:35,373 DEBUG [M:0;6a989be77d39:44171 {}] zookeeper.ZKUtil(347): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T03:56:35,373 WARN [M:0;6a989be77d39:44171 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T03:56:35,374 INFO [M:0;6a989be77d39:44171 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/.lastflushedseqids 2024-11-14T03:56:35,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741892_1078 (size=130) 2024-11-14T03:56:35,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741892_1078 (size=130) 2024-11-14T03:56:35,385 INFO [M:0;6a989be77d39:44171 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:56:35,385 INFO [M:0;6a989be77d39:44171 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:56:35,386 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:56:35,386 INFO [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:35,386 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:35,386 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:56:35,386 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:35,386 INFO [M:0;6a989be77d39:44171 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-14T03:56:35,405 DEBUG [M:0;6a989be77d39:44171 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb9351ac50654d69b615c7cee4f5cea9 is 82, key is hbase:meta,,1/info:regioninfo/1731556552875/Put/seqid=0 2024-11-14T03:56:35,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741893_1079 (size=5672) 2024-11-14T03:56:35,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741893_1079 (size=5672) 2024-11-14T03:56:35,416 INFO [M:0;6a989be77d39:44171 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb9351ac50654d69b615c7cee4f5cea9 2024-11-14T03:56:35,453 DEBUG [M:0;6a989be77d39:44171 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bec25e32d88740db947ec23e6d2cb436 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731556553992/Put/seqid=0 2024-11-14T03:56:35,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741894_1080 (size=6256) 2024-11-14T03:56:35,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741894_1080 (size=6256) 2024-11-14T03:56:35,460 INFO [M:0;6a989be77d39:44171 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bec25e32d88740db947ec23e6d2cb436 2024-11-14T03:56:35,468 INFO [M:0;6a989be77d39:44171 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bec25e32d88740db947ec23e6d2cb436 2024-11-14T03:56:35,469 INFO [RS:0;6a989be77d39:46431 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:56:35,469 INFO [RS:0;6a989be77d39:46431 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,46431,1731556551881; zookeeper connection closed. 2024-11-14T03:56:35,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:56:35,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46431-0x100ef6320fb0001, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:56:35,472 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70dcedc6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70dcedc6 2024-11-14T03:56:35,472 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-14T03:56:35,486 DEBUG [M:0;6a989be77d39:44171 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f5f49b54bbc446fb7faf9307cd686c5 is 69, key is 6a989be77d39,36173,1731556553047/rs:state/1731556553100/Put/seqid=0 2024-11-14T03:56:35,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741895_1081 (size=5224) 2024-11-14T03:56:35,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741895_1081 (size=5224) 2024-11-14T03:56:35,497 INFO [M:0;6a989be77d39:44171 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f5f49b54bbc446fb7faf9307cd686c5 2024-11-14T03:56:35,528 DEBUG [M:0;6a989be77d39:44171 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/96971e2cbfe045d383f4a904d4acd41e is 52, key is load_balancer_on/state:d/1731556553030/Put/seqid=0 2024-11-14T03:56:35,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741896_1082 (size=5056) 2024-11-14T03:56:35,540 INFO [M:0;6a989be77d39:44171 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/96971e2cbfe045d383f4a904d4acd41e 2024-11-14T03:56:35,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741896_1082 (size=5056) 2024-11-14T03:56:35,546 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb9351ac50654d69b615c7cee4f5cea9 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb9351ac50654d69b615c7cee4f5cea9 2024-11-14T03:56:35,554 INFO [M:0;6a989be77d39:44171 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb9351ac50654d69b615c7cee4f5cea9, entries=8, sequenceid=60, filesize=5.5 K 2024-11-14T03:56:35,555 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bec25e32d88740db947ec23e6d2cb436 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bec25e32d88740db947ec23e6d2cb436 2024-11-14T03:56:35,566 INFO [M:0;6a989be77d39:44171 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bec25e32d88740db947ec23e6d2cb436 2024-11-14T03:56:35,566 INFO [M:0;6a989be77d39:44171 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bec25e32d88740db947ec23e6d2cb436, entries=6, sequenceid=60, filesize=6.1 K 2024-11-14T03:56:35,574 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f5f49b54bbc446fb7faf9307cd686c5 as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1f5f49b54bbc446fb7faf9307cd686c5 2024-11-14T03:56:35,590 INFO [M:0;6a989be77d39:44171 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1f5f49b54bbc446fb7faf9307cd686c5, entries=2, sequenceid=60, filesize=5.1 K 2024-11-14T03:56:35,594 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/96971e2cbfe045d383f4a904d4acd41e as hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/96971e2cbfe045d383f4a904d4acd41e 2024-11-14T03:56:35,608 INFO [M:0;6a989be77d39:44171 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/96971e2cbfe045d383f4a904d4acd41e, entries=1, sequenceid=60, filesize=4.9 K 2024-11-14T03:56:35,609 INFO [M:0;6a989be77d39:44171 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 223ms, sequenceid=60, compaction requested=false 2024-11-14T03:56:35,611 INFO [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:35,611 DEBUG [M:0;6a989be77d39:44171 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556595385Disabling compacts and flushes for region at 1731556595385Disabling writes for close at 1731556595386 (+1 ms)Obtaining lock to block concurrent updates at 1731556595386Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556595386Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731556595386Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556595387 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556595387Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556595405 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556595405Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556595424 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556595452 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556595452Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556595468 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556595486 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556595486Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556595504 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556595527 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556595527Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b54d03: reopening flushed file at 1731556595546 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@872ea22: reopening flushed file at 1731556595555 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20c60d4f: reopening flushed file at 1731556595566 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e931f91: reopening flushed file at 1731556595590 (+24 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 223ms, sequenceid=60, compaction requested=false at 1731556595609 (+19 ms)Writing region close event to WAL at 1731556595611 (+2 ms)Closed at 1731556595611 2024-11-14T03:56:35,612 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,612 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,613 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:35,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37291 is added to blk_1073741880_1063 (size=1045) 2024-11-14T03:56:35,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44609 is added to blk_1073741880_1063 (size=1045) 2024-11-14T03:56:35,616 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:56:35,616 INFO [M:0;6a989be77d39:44171 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:56:35,616 INFO [M:0;6a989be77d39:44171 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44171 2024-11-14T03:56:35,617 INFO [M:0;6a989be77d39:44171 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:56:35,713 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:56:35,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:56:35,720 INFO [M:0;6a989be77d39:44171 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:56:35,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44171-0x100ef6320fb0000, quorum=127.0.0.1:64949, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:56:35,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@95c65e3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:35,723 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2aaad1eb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:35,723 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:35,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32cdb76b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:35,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21cc8d76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:35,724 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@51d9956d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1399754620-172.17.0.2-1731556550838:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:33561,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:42993 , LocalHost:localPort 6a989be77d39/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T03:56:35,726 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399754620-172.17.0.2-1731556550838 (Datanode Uuid 9afe9bd4-33a6-4808-b0e1-7de0b2745446) service to localhost/127.0.0.1:43619 2024-11-14T03:56:35,727 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data4/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:35,729 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data3/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:35,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,730 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@51d9956d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33561,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1399754620-172.17.0.2-1731556550838 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:35,730 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@51d9956d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44609,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1399754620-172.17.0.2-1731556550838 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:35,731 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@51d9956d {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33561,null,null], DatanodeInfoWithStorage[127.0.0.1:44609,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1399754620-172.17.0.2-1731556550838:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33561,null,null], DatanodeInfoWithStorage[127.0.0.1:44609,null,null]] 2024-11-14T03:56:35,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:35,767 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:35,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a9d9bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:35,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47d01054{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:35,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:35,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@325d7ee2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:35,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c5f7ee0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:35,772 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:35,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:35,772 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:35,772 WARN [BP-1399754620-172.17.0.2-1731556550838 heartbeating to localhost/127.0.0.1:43619 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399754620-172.17.0.2-1731556550838 (Datanode Uuid 39a6d0c1-203e-4807-bb4e-95c3fb4af9c2) service to localhost/127.0.0.1:43619 2024-11-14T03:56:35,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data9/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:35,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/cluster_c9c29ee8-6e39-da03-c7e4-b9f600a9909a/data/data10/current/BP-1399754620-172.17.0.2-1731556550838 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:35,774 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:35,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d607c51{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:56:35,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ae0a20f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:35,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:35,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fd186ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:35,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73b9709e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:35,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:56:35,835 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:56:35,846 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 80) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:43619 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43619 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43619 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:35605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:43619 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43619 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43619 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43619 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$895/0x00007f7d80bf7868.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$895/0x00007f7d80bf7868.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=405 (was 395) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=5768 (was 5396) - AvailableMemoryMB LEAK? - 2024-11-14T03:56:35,855 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=405, ProcessCount=12, AvailableMemoryMB=5768 2024-11-14T03:56:35,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.log.dir so I do NOT create it in target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6072b488-3b5b-7177-30b6-2ff81fa9cf67/hadoop.tmp.dir so I do NOT create it in target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd, deleteOnExit=true 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/test.cache.data in system properties and HBase conf 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:56:35,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:56:35,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:56:35,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:56:35,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:56:35,857 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:56:35,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:56:35,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:56:35,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:56:35,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:56:35,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:56:35,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:56:35,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:56:35,876 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:56:35,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:35,971 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:35,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:35,978 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:35,978 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:35,979 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:35,979 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24710539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:35,980 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14ea830a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:36,106 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1698d36f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-38267-hadoop-hdfs-3_4_1-tests_jar-_-any-337587393361711357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:56:36,107 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13fdd007{HTTP/1.1, (http/1.1)}{localhost:38267} 2024-11-14T03:56:36,107 INFO [Time-limited test {}] server.Server(415): Started @152119ms 2024-11-14T03:56:36,121 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:56:36,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:36,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:36,191 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:36,195 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:36,195 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:36,195 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:36,195 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:56:36,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fd8c23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:36,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@dd504c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:36,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58bf7935{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-45525-hadoop-hdfs-3_4_1-tests_jar-_-any-16132651276565152968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:36,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74ea1d44{HTTP/1.1, (http/1.1)}{localhost:45525} 2024-11-14T03:56:36,327 INFO [Time-limited test {}] server.Server(415): Started @152339ms 2024-11-14T03:56:36,328 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:36,372 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:36,376 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:36,377 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:36,377 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:36,377 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:56:36,378 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232fa1ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:36,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e63449e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:36,428 WARN [Thread-1180 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data1/current/BP-75461122-172.17.0.2-1731556595898/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:36,429 WARN [Thread-1181 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data2/current/BP-75461122-172.17.0.2-1731556595898/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:36,454 WARN [Thread-1159 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:36,457 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16cc402c21434d46 with lease ID 0xfd5c1d0ce6298f27: Processing first storage report for DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd from datanode DatanodeRegistration(127.0.0.1:36469, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=34577, infoSecurePort=0, ipcPort=40401, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898) 2024-11-14T03:56:36,457 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16cc402c21434d46 with lease ID 0xfd5c1d0ce6298f27: from storage DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd node DatanodeRegistration(127.0.0.1:36469, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=34577, infoSecurePort=0, ipcPort=40401, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:36,457 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16cc402c21434d46 with lease ID 0xfd5c1d0ce6298f27: Processing first storage report for DS-5041434b-5e0d-4397-b572-78266d889856 from datanode DatanodeRegistration(127.0.0.1:36469, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=34577, infoSecurePort=0, ipcPort=40401, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898) 2024-11-14T03:56:36,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16cc402c21434d46 with lease ID 0xfd5c1d0ce6298f27: from storage DS-5041434b-5e0d-4397-b572-78266d889856 node DatanodeRegistration(127.0.0.1:36469, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=34577, infoSecurePort=0, ipcPort=40401, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:36,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15e7f484{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-42975-hadoop-hdfs-3_4_1-tests_jar-_-any-1107770916638421383/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:36,505 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@586e8021{HTTP/1.1, (http/1.1)}{localhost:42975} 2024-11-14T03:56:36,505 INFO [Time-limited test {}] server.Server(415): Started @152517ms 2024-11-14T03:56:36,506 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:36,622 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data4/current/BP-75461122-172.17.0.2-1731556595898/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:36,622 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data3/current/BP-75461122-172.17.0.2-1731556595898/current, will proceed with Du for space computation calculation, 2024-11-14T03:56:36,642 WARN [Thread-1195 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb9ba186c172a337 with lease ID 0xfd5c1d0ce6298f28: Processing first storage report for DS-ecfb4e98-8651-46f9-8733-4cd51229e903 from datanode DatanodeRegistration(127.0.0.1:33835, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=42347, infoSecurePort=0, ipcPort=36465, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898) 2024-11-14T03:56:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb9ba186c172a337 with lease ID 0xfd5c1d0ce6298f28: from storage DS-ecfb4e98-8651-46f9-8733-4cd51229e903 node DatanodeRegistration(127.0.0.1:33835, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=42347, infoSecurePort=0, ipcPort=36465, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb9ba186c172a337 with lease ID 0xfd5c1d0ce6298f28: Processing first storage report for DS-1f88b0bb-0869-477f-b3fd-b15c04318277 from datanode DatanodeRegistration(127.0.0.1:33835, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=42347, infoSecurePort=0, ipcPort=36465, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898) 2024-11-14T03:56:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb9ba186c172a337 with lease ID 0xfd5c1d0ce6298f28: from storage DS-1f88b0bb-0869-477f-b3fd-b15c04318277 node DatanodeRegistration(127.0.0.1:33835, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=42347, infoSecurePort=0, ipcPort=36465, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:36,741 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d 2024-11-14T03:56:36,744 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/zookeeper_0, clientPort=50409, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:56:36,745 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50409 2024-11-14T03:56:36,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:56:36,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:56:36,760 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9 with version=8 2024-11-14T03:56:36,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:56:36,762 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:56:36,762 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:56:36,762 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:56:36,763 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:56:36,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:56:36,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:56:36,763 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:56:36,763 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:56:36,763 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39519 2024-11-14T03:56:36,765 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39519 connecting to ZooKeeper ensemble=127.0.0.1:50409 2024-11-14T03:56:36,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395190x0, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:56:36,776 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39519-0x100ef63d0b20000 connected 2024-11-14T03:56:36,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,795 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:56:36,795 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9, hbase.cluster.distributed=false 2024-11-14T03:56:36,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:56:36,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39519 2024-11-14T03:56:36,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39519 2024-11-14T03:56:36,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39519 2024-11-14T03:56:36,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39519 2024-11-14T03:56:36,798 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39519 2024-11-14T03:56:36,816 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:56:36,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:56:36,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:56:36,816 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:56:36,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:56:36,816 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:56:36,816 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:56:36,817 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:56:36,817 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32967 2024-11-14T03:56:36,818 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32967 connecting to ZooKeeper ensemble=127.0.0.1:50409 2024-11-14T03:56:36,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329670x0, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:56:36,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329670x0, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:56:36,827 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:56:36,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32967-0x100ef63d0b20001 connected 2024-11-14T03:56:36,834 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:56:36,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:56:36,836 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:56:36,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32967 2024-11-14T03:56:36,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32967 2024-11-14T03:56:36,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32967 2024-11-14T03:56:36,842 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32967 2024-11-14T03:56:36,843 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32967 2024-11-14T03:56:36,856 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:39519 2024-11-14T03:56:36,856 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,39519,1731556596762 2024-11-14T03:56:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:56:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:56:36,860 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,39519,1731556596762 2024-11-14T03:56:36,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:36,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:56:36,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:36,862 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:56:36,863 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,39519,1731556596762 from backup master directory 2024-11-14T03:56:36,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,39519,1731556596762 2024-11-14T03:56:36,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:56:36,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:56:36,864 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:56:36,865 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,39519,1731556596762 2024-11-14T03:56:36,870 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/hbase.id] with ID: a8bf9cd1-1a09-4ce7-8de8-c8c650a1e91c 2024-11-14T03:56:36,870 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/.tmp/hbase.id 2024-11-14T03:56:36,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:56:36,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:56:36,878 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/.tmp/hbase.id]:[hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/hbase.id] 2024-11-14T03:56:36,895 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:36,895 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:56:36,897 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T03:56:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:36,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:56:36,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:56:36,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:56:36,908 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:56:36,909 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:56:36,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:56:36,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:56:37,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:37,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:37,319 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store 2024-11-14T03:56:37,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:56:37,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:56:37,328 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:56:37,328 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:56:37,328 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:37,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:37,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:56:37,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:37,329 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:56:37,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556597328Disabling compacts and flushes for region at 1731556597328Disabling writes for close at 1731556597329 (+1 ms)Writing region close event to WAL at 1731556597329Closed at 1731556597329 2024-11-14T03:56:37,330 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/.initializing 2024-11-14T03:56:37,330 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762 2024-11-14T03:56:37,333 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C39519%2C1731556596762, suffix=, logDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762, archiveDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/oldWALs, maxLogs=10 2024-11-14T03:56:37,333 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C39519%2C1731556596762.1731556597333 2024-11-14T03:56:37,362 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 2024-11-14T03:56:37,363 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34577:34577),(127.0.0.1/127.0.0.1:42347:42347)] 2024-11-14T03:56:37,372 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:56:37,372 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:56:37,372 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,372 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:56:37,378 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:37,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:56:37,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:56:37,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:56:37,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:56:37,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:56:37,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:56:37,385 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,386 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,386 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,388 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,388 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,388 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:56:37,390 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:56:37,392 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:56:37,392 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710543, jitterRate=-0.09649848937988281}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:56:37,393 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556597373Initializing all the Stores at 1731556597375 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556597375Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556597375Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556597375Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556597375Cleaning up temporary data from old regions at 1731556597388 (+13 ms)Region opened successfully at 1731556597393 (+5 ms) 2024-11-14T03:56:37,393 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:56:37,397 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4782baa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:56:37,398 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:56:37,398 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:56:37,398 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:56:37,398 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:56:37,399 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T03:56:37,399 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T03:56:37,399 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:56:37,401 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:56:37,402 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:56:37,404 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:56:37,404 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:56:37,405 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:56:37,406 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:56:37,406 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:56:37,411 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:56:37,412 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:56:37,413 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:56:37,415 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:56:37,417 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:56:37,418 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:56:37,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:56:37,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:56:37,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,421 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,39519,1731556596762, sessionid=0x100ef63d0b20000, setting cluster-up flag (Was=false) 2024-11-14T03:56:37,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,430 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:56:37,431 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,39519,1731556596762 2024-11-14T03:56:37,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,439 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:56:37,440 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,39519,1731556596762 2024-11-14T03:56:37,442 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:56:37,443 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:56:37,444 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:56:37,444 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:56:37,444 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,39519,1731556596762 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,445 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:56:37,446 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,446 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(746): ClusterId : a8bf9cd1-1a09-4ce7-8de8-c8c650a1e91c 2024-11-14T03:56:37,446 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:56:37,448 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:56:37,448 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:56:37,450 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:56:37,450 DEBUG [RS:0;6a989be77d39:32967 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@265e4e3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:56:37,451 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:56:37,451 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:56:37,453 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,453 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:56:37,453 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556627453 2024-11-14T03:56:37,453 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:56:37,453 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:56:37,454 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:56:37,454 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:56:37,454 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:56:37,454 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:56:37,461 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,470 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:56:37,470 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:56:37,470 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:56:37,470 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:32967 2024-11-14T03:56:37,470 INFO [RS:0;6a989be77d39:32967 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:56:37,470 INFO [RS:0;6a989be77d39:32967 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:56:37,470 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:56:37,471 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,39519,1731556596762 with port=32967, startcode=1731556596816 2024-11-14T03:56:37,471 DEBUG [RS:0;6a989be77d39:32967 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:56:37,481 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:56:37,481 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:56:37,484 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49705, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:56:37,485 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39519 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:56:37,485 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556597481,5,FailOnTimeoutGroup] 2024-11-14T03:56:37,485 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T03:56:37,486 WARN [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T03:56:37,490 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556597485,5,FailOnTimeoutGroup] 2024-11-14T03:56:37,490 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,490 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:56:37,490 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,491 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:56:37,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:56:37,493 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:56:37,494 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9 2024-11-14T03:56:37,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:56:37,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:56:37,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:56:37,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:56:37,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:56:37,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:37,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:56:37,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:56:37,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:37,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:56:37,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:56:37,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:37,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:56:37,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:56:37,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:37,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:37,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:56:37,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740 2024-11-14T03:56:37,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740 2024-11-14T03:56:37,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:56:37,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:56:37,536 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:56:37,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:56:37,541 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:56:37,542 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735244, jitterRate=-0.06508976221084595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:56:37,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556597519Initializing all the Stores at 1731556597521 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556597521Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556597521Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556597521Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556597521Cleaning up temporary data from old regions at 1731556597534 (+13 ms)Region opened successfully at 1731556597542 (+8 ms) 2024-11-14T03:56:37,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:56:37,543 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:56:37,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:56:37,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:56:37,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:56:37,543 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:56:37,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556597543Disabling compacts and flushes for region at 1731556597543Disabling writes for close at 1731556597543Writing region close event to WAL at 1731556597543Closed at 1731556597543 2024-11-14T03:56:37,545 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:56:37,545 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:56:37,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:56:37,547 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:56:37,549 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:56:37,587 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,39519,1731556596762 with port=32967, startcode=1731556596816 2024-11-14T03:56:37,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39519 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39519 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,590 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9 2024-11-14T03:56:37,590 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42105 2024-11-14T03:56:37,590 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:56:37,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:56:37,593 DEBUG [RS:0;6a989be77d39:32967 {}] zookeeper.ZKUtil(111): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,593 WARN [RS:0;6a989be77d39:32967 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:56:37,593 INFO [RS:0;6a989be77d39:32967 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:56:37,593 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,594 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,32967,1731556596816] 2024-11-14T03:56:37,600 INFO [RS:0;6a989be77d39:32967 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:56:37,602 INFO [RS:0;6a989be77d39:32967 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:56:37,603 INFO [RS:0;6a989be77d39:32967 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:56:37,603 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,614 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:56:37,617 INFO [RS:0;6a989be77d39:32967 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:56:37,617 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,617 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,617 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:56:37,618 DEBUG [RS:0;6a989be77d39:32967 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:56:37,619 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,619 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,619 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,619 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,619 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,620 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,32967,1731556596816-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:56:37,643 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:56:37,643 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,32967,1731556596816-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,644 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,644 INFO [RS:0;6a989be77d39:32967 {}] regionserver.Replication(171): 6a989be77d39,32967,1731556596816 started 2024-11-14T03:56:37,666 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:37,666 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,32967,1731556596816, RpcServer on 6a989be77d39/172.17.0.2:32967, sessionid=0x100ef63d0b20001 2024-11-14T03:56:37,666 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:56:37,666 DEBUG [RS:0;6a989be77d39:32967 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,666 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,32967,1731556596816' 2024-11-14T03:56:37,666 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:56:37,667 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:56:37,668 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:56:37,668 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:56:37,668 DEBUG [RS:0;6a989be77d39:32967 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,668 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,32967,1731556596816' 2024-11-14T03:56:37,668 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:56:37,668 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:56:37,669 DEBUG [RS:0;6a989be77d39:32967 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:56:37,669 INFO [RS:0;6a989be77d39:32967 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:56:37,669 INFO [RS:0;6a989be77d39:32967 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:56:37,699 WARN [6a989be77d39:39519 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T03:56:37,773 INFO [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C32967%2C1731556596816, suffix=, logDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816, archiveDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs, maxLogs=32 2024-11-14T03:56:37,775 INFO [RS:0;6a989be77d39:32967 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:37,785 INFO [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:37,787 DEBUG [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42347:42347),(127.0.0.1/127.0.0.1:34577:34577)] 2024-11-14T03:56:37,950 DEBUG [6a989be77d39:39519 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:56:37,950 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,32967,1731556596816 2024-11-14T03:56:37,952 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,32967,1731556596816, state=OPENING 2024-11-14T03:56:37,954 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:56:37,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:56:37,956 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:56:37,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:56:37,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:56:37,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,32967,1731556596816}] 2024-11-14T03:56:38,111 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:56:38,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49029, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:56:38,119 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:56:38,119 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:56:38,121 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C32967%2C1731556596816.meta, suffix=.meta, logDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816, archiveDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs, maxLogs=32 2024-11-14T03:56:38,122 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta 2024-11-14T03:56:38,130 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta 2024-11-14T03:56:38,134 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34577:34577),(127.0.0.1/127.0.0.1:42347:42347)] 2024-11-14T03:56:38,134 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:56:38,135 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:56:38,135 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:56:38,135 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:56:38,135 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:56:38,135 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:56:38,136 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:56:38,136 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:56:38,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:56:38,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:56:38,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:38,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:38,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:56:38,141 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:56:38,141 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:38,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:38,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:56:38,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:56:38,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:38,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:38,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:56:38,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:56:38,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:38,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:56:38,145 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:56:38,146 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740 2024-11-14T03:56:38,147 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740 2024-11-14T03:56:38,149 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:56:38,149 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:56:38,150 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:56:38,151 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:56:38,152 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692523, jitterRate=-0.11941252648830414}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:56:38,153 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:56:38,153 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556598136Writing region info on filesystem at 1731556598136Initializing all the Stores at 1731556598137 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556598137Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556598138 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556598138Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556598138Cleaning up temporary data from old regions at 1731556598149 (+11 ms)Running coprocessor post-open hooks at 1731556598153 (+4 ms)Region opened successfully at 1731556598153 2024-11-14T03:56:38,154 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556598110 2024-11-14T03:56:38,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:38,157 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:56:38,157 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:56:38,158 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,32967,1731556596816 2024-11-14T03:56:38,159 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,32967,1731556596816, state=OPEN 2024-11-14T03:56:38,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:38,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:56:38,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:56:38,164 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:56:38,164 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,32967,1731556596816 2024-11-14T03:56:38,164 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:56:38,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:56:38,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,32967,1731556596816 in 208 msec 2024-11-14T03:56:38,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:56:38,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 624 msec 2024-11-14T03:56:38,173 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:56:38,173 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:56:38,175 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:56:38,175 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,32967,1731556596816, seqNum=-1] 2024-11-14T03:56:38,175 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:56:38,177 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35695, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:56:38,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 739 msec 2024-11-14T03:56:38,183 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556598183, completionTime=-1 2024-11-14T03:56:38,183 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:56:38,183 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:56:38,185 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556658185 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556718186 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39519,1731556596762-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39519,1731556596762-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39519,1731556596762-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:39519, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,186 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,187 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,188 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.326sec 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39519,1731556596762-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:56:38,191 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39519,1731556596762-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:56:38,195 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:56:38,195 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:56:38,195 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,39519,1731556596762-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:56:38,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@716d71e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:56:38,245 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,39519,-1 for getting cluster id 2024-11-14T03:56:38,246 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:56:38,248 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a8bf9cd1-1a09-4ce7-8de8-c8c650a1e91c' 2024-11-14T03:56:38,249 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:56:38,249 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a8bf9cd1-1a09-4ce7-8de8-c8c650a1e91c" 2024-11-14T03:56:38,249 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e78e3a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:56:38,249 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,39519,-1] 2024-11-14T03:56:38,249 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:56:38,250 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:56:38,251 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35798, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:56:38,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d10ba6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:56:38,253 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:56:38,253 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,32967,1731556596816, seqNum=-1] 2024-11-14T03:56:38,254 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:56:38,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:56:38,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,39519,1731556596762 2024-11-14T03:56:38,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:56:38,260 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:56:38,260 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-14T03:56:38,260 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-14T03:56:38,261 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T03:56:38,262 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 6a989be77d39,39519,1731556596762 2024-11-14T03:56:38,262 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@552407f3 2024-11-14T03:56:38,262 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T03:56:38,264 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T03:56:38,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T03:56:38,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T03:56:38,265 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:56:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T03:56:38,268 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T03:56:38,268 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:38,268 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-14T03:56:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:56:38,270 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T03:56:38,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741835_1011 (size=395) 2024-11-14T03:56:38,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741835_1011 (size=395) 2024-11-14T03:56:38,282 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fae8a96709dbf13c257406dcd3c2f42a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9 2024-11-14T03:56:38,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33835 is added to blk_1073741836_1012 (size=78) 2024-11-14T03:56:38,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36469 is added to blk_1073741836_1012 (size=78) 2024-11-14T03:56:38,304 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:56:38,304 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing fae8a96709dbf13c257406dcd3c2f42a, disabling compactions & flushes 2024-11-14T03:56:38,304 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,304 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,304 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. after waiting 0 ms 2024-11-14T03:56:38,304 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,304 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,304 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for fae8a96709dbf13c257406dcd3c2f42a: Waiting for close lock at 1731556598304Disabling compacts and flushes for region at 1731556598304Disabling writes for close at 1731556598304Writing region close event to WAL at 1731556598304Closed at 1731556598304 2024-11-14T03:56:38,306 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T03:56:38,306 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731556598306"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556598306"}]},"ts":"1731556598306"} 2024-11-14T03:56:38,309 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T03:56:38,310 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T03:56:38,311 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556598310"}]},"ts":"1731556598310"} 2024-11-14T03:56:38,313 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-14T03:56:38,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fae8a96709dbf13c257406dcd3c2f42a, ASSIGN}] 2024-11-14T03:56:38,315 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fae8a96709dbf13c257406dcd3c2f42a, ASSIGN 2024-11-14T03:56:38,316 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fae8a96709dbf13c257406dcd3c2f42a, ASSIGN; state=OFFLINE, location=6a989be77d39,32967,1731556596816; forceNewPlan=false, retain=false 2024-11-14T03:56:38,467 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fae8a96709dbf13c257406dcd3c2f42a, regionState=OPENING, regionLocation=6a989be77d39,32967,1731556596816 2024-11-14T03:56:38,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fae8a96709dbf13c257406dcd3c2f42a, ASSIGN because future has completed 2024-11-14T03:56:38,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fae8a96709dbf13c257406dcd3c2f42a, server=6a989be77d39,32967,1731556596816}] 2024-11-14T03:56:38,629 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,629 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => fae8a96709dbf13c257406dcd3c2f42a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:56:38,630 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,630 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:56:38,630 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,630 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,631 INFO [StoreOpener-fae8a96709dbf13c257406dcd3c2f42a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,633 INFO [StoreOpener-fae8a96709dbf13c257406dcd3c2f42a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fae8a96709dbf13c257406dcd3c2f42a columnFamilyName info 2024-11-14T03:56:38,633 DEBUG [StoreOpener-fae8a96709dbf13c257406dcd3c2f42a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:56:38,633 INFO [StoreOpener-fae8a96709dbf13c257406dcd3c2f42a-1 {}] regionserver.HStore(327): Store=fae8a96709dbf13c257406dcd3c2f42a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:56:38,633 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,634 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,634 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,635 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,635 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,637 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,640 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:56:38,640 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened fae8a96709dbf13c257406dcd3c2f42a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750103, jitterRate=-0.04619549214839935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:56:38,641 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:56:38,641 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for fae8a96709dbf13c257406dcd3c2f42a: Running coprocessor pre-open hook at 1731556598630Writing region info on filesystem at 1731556598630Initializing all the Stores at 1731556598631 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556598631Cleaning up temporary data from old regions at 1731556598635 (+4 ms)Running coprocessor post-open hooks at 1731556598641 (+6 ms)Region opened successfully at 1731556598641 2024-11-14T03:56:38,643 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a., pid=6, masterSystemTime=1731556598624 2024-11-14T03:56:38,646 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,646 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:38,647 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fae8a96709dbf13c257406dcd3c2f42a, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,32967,1731556596816 2024-11-14T03:56:38,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fae8a96709dbf13c257406dcd3c2f42a, server=6a989be77d39,32967,1731556596816 because future has completed 2024-11-14T03:56:38,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T03:56:38,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure fae8a96709dbf13c257406dcd3c2f42a, server=6a989be77d39,32967,1731556596816 in 181 msec 2024-11-14T03:56:38,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T03:56:38,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fae8a96709dbf13c257406dcd3c2f42a, ASSIGN in 341 msec 2024-11-14T03:56:38,665 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T03:56:38,665 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556598665"}]},"ts":"1731556598665"} 2024-11-14T03:56:38,668 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-14T03:56:38,669 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T03:56:38,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 404 msec 2024-11-14T03:56:39,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:39,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:40,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:40,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:40,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:56:40,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T03:56:40,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T03:56:40,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-14T03:56:40,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:56:40,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T03:56:41,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:41,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:42,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:42,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:43,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:43,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:43,638 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:56:43,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:56:43,676 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T03:56:43,677 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-14T03:56:44,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:44,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:45,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:45,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:46,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:46,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:47,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:47,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:48,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:48,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:48,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39519 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:56:48,321 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-14T03:56:48,322 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-14T03:56:48,326 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T03:56:48,326 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:56:48,330 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a., hostname=6a989be77d39,32967,1731556596816, seqNum=2] 2024-11-14T03:56:49,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:49,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:50,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:50,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:50,333 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:50,334 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:50,335 WARN [DataStreamer for file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 block BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK], DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]) is bad. 2024-11-14T03:56:50,336 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:50,336 WARN [DataStreamer for file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 block BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK], DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]) is bad. 2024-11-14T03:56:50,337 WARN [PacketResponder: BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33835] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,336 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:48830 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36469:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48830 dst: /127.0.0.1:36469 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1575121715_22 at /127.0.0.1:48802 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36469:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48802 dst: /127.0.0.1:36469 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,336 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:60152 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33835:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60152 dst: /127.0.0.1:33835 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1575121715_22 at /127.0.0.1:60130 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33835:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60130 dst: /127.0.0.1:33835 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,341 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:50,342 WARN [DataStreamer for file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta block BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK], DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33835,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]) is bad. 2024-11-14T03:56:50,342 WARN [PacketResponder: BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33835] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,342 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:48846 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36469:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48846 dst: /127.0.0.1:36469 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,342 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:60168 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33835:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60168 dst: /127.0.0.1:33835 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15e7f484{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:50,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@586e8021{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:50,352 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:50,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e63449e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:50,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232fa1ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:50,358 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:50,358 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:50,358 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-75461122-172.17.0.2-1731556595898 (Datanode Uuid 713432d8-9621-4318-9e01-0877af36c355) service to localhost/127.0.0.1:42105 2024-11-14T03:56:50,358 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:50,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data3/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:50,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data4/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:50,359 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:50,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:50,401 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:50,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:50,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:50,406 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:50,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7aa94e72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:50,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ac8bbe4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:50,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@386ed495{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-36587-hadoop-hdfs-3_4_1-tests_jar-_-any-7476850507616787675/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:50,605 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68e23717{HTTP/1.1, (http/1.1)}{localhost:36587} 2024-11-14T03:56:50,605 INFO [Time-limited test {}] server.Server(415): Started @166617ms 2024-11-14T03:56:50,607 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:50,670 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:50,670 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:50,670 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:50,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:37968 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36469:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37968 dst: /127.0.0.1:36469 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:37966 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36469:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37966 dst: /127.0.0.1:36469 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,676 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1575121715_22 at /127.0.0.1:37962 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36469:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37962 dst: /127.0.0.1:36469 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:50,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58bf7935{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:50,692 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74ea1d44{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:50,692 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:50,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@dd504c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:50,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fd8c23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:50,694 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:50,694 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-75461122-172.17.0.2-1731556595898 (Datanode Uuid a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4) service to localhost/127.0.0.1:42105 2024-11-14T03:56:50,694 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:50,694 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:50,695 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data1/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:50,695 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data2/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:50,695 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:50,726 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:50,733 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:50,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:50,736 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:50,736 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:50,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3903d405{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:50,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a2ed0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:50,770 WARN [Thread-1330 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:50,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d45b8de9f76f60a with lease ID 0xfd5c1d0ce6298f29: from storage DS-ecfb4e98-8651-46f9-8733-4cd51229e903 node DatanodeRegistration(127.0.0.1:41415, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=43315, infoSecurePort=0, ipcPort=46413, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:50,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d45b8de9f76f60a with lease ID 0xfd5c1d0ce6298f29: from storage DS-1f88b0bb-0869-477f-b3fd-b15c04318277 node DatanodeRegistration(127.0.0.1:41415, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=43315, infoSecurePort=0, ipcPort=46413, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:50,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@480f741f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-34503-hadoop-hdfs-3_4_1-tests_jar-_-any-18007720070176909215/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:50,890 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31267d1e{HTTP/1.1, (http/1.1)}{localhost:34503} 2024-11-14T03:56:50,890 INFO [Time-limited test {}] server.Server(415): Started @166902ms 2024-11-14T03:56:50,892 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:51,092 WARN [Thread-1361 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:51,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd612122136416f84 with lease ID 0xfd5c1d0ce6298f2a: from storage DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd node DatanodeRegistration(127.0.0.1:41257, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=45225, infoSecurePort=0, ipcPort=46309, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T03:56:51,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd612122136416f84 with lease ID 0xfd5c1d0ce6298f2a: from storage DS-5041434b-5e0d-4397-b572-78266d889856 node DatanodeRegistration(127.0.0.1:41257, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=45225, infoSecurePort=0, ipcPort=46309, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:51,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:51,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:52,011 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-14T03:56:52,014 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-14T03:56:52,016 ERROR [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:52,016 WARN [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:52,016 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C32967%2C1731556596816:(num 1731556597774) roll requested 2024-11-14T03:56:52,017 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:52,035 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 newFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:52,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:52,035 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:52,035 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:52,035 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:52,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:52,036 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:52,036 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:52,036 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:52,036 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:52,037 WARN [IPC Server handler 2 on default port 42105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-14T03:56:52,037 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 after 1ms 2024-11-14T03:56:52,039 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45225:45225),(127.0.0.1/127.0.0.1:43315:43315)] 2024-11-14T03:56:52,039 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 is not closed yet, will try archiving it next time 2024-11-14T03:56:52,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:52,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:53,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:53,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:54,046 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-14T03:56:54,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:54,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:54,777 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T03:56:55,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:55,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:56,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 after 4009ms 2024-11-14T03:56:56,052 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:56,052 WARN [DataStreamer for file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 block BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41257,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK], DatanodeInfoWithStorage[127.0.0.1:41415,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41257,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]) is bad. 2024-11-14T03:56:56,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:46384 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41257:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46384 dst: /127.0.0.1:41257 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:56,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:34492 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34492 dst: /127.0.0.1:41415 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:56,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@480f741f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:56,062 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31267d1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:56,062 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:56,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a2ed0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:56,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3903d405{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:56,074 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:56,074 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-75461122-172.17.0.2-1731556595898 (Datanode Uuid a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4) service to localhost/127.0.0.1:42105 2024-11-14T03:56:56,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data2/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:56,076 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:56,076 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:56,077 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:56,149 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data1/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:56,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:56,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:56,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:56,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:56,182 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:56,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:56,183 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:56:56,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dcfabbb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:56,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@443f9e40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:56,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c2b3ac7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-40607-hadoop-hdfs-3_4_1-tests_jar-_-any-1703230486514771426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:56,355 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20432799{HTTP/1.1, (http/1.1)}{localhost:40607} 2024-11-14T03:56:56,355 INFO [Time-limited test {}] server.Server(415): Started @172367ms 2024-11-14T03:56:56,357 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:56,432 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:56,434 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1001149747_22 at /127.0.0.1:34506 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41415:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34506 dst: /127.0.0.1:41415 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:56:56,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@386ed495{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:56,445 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68e23717{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:56:56,446 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:56:56,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ac8bbe4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:56:56,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7aa94e72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:56:56,448 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:56:56,448 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:56:56,448 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-75461122-172.17.0.2-1731556595898 (Datanode Uuid 713432d8-9621-4318-9e01-0877af36c355) service to localhost/127.0.0.1:42105 2024-11-14T03:56:56,448 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:56:56,450 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data3/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:56,451 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data4/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:56:56,454 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:56:56,476 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:56:56,479 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:56:56,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:56:56,480 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:56:56,480 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:56:56,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21404da7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:56:56,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39f8899d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:56:56,512 WARN [Thread-1404 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:56,516 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1933a2f6b1268ee9 with lease ID 0xfd5c1d0ce6298f2b: from storage DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd node DatanodeRegistration(127.0.0.1:38983, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=41693, infoSecurePort=0, ipcPort=37811, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:56,516 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1933a2f6b1268ee9 with lease ID 0xfd5c1d0ce6298f2b: from storage DS-5041434b-5e0d-4397-b572-78266d889856 node DatanodeRegistration(127.0.0.1:38983, datanodeUuid=a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4, infoPort=41693, infoSecurePort=0, ipcPort=37811, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:56,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@661bf07e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/java.io.tmpdir/jetty-localhost-37947-hadoop-hdfs-3_4_1-tests_jar-_-any-14190521266063409294/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:56:56,616 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1decdda3{HTTP/1.1, (http/1.1)}{localhost:37947} 2024-11-14T03:56:56,616 INFO [Time-limited test {}] server.Server(415): Started @172628ms 2024-11-14T03:56:56,618 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:56:56,716 WARN [Thread-1435 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:56:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2bfa8359747d4d59 with lease ID 0xfd5c1d0ce6298f2c: from storage DS-ecfb4e98-8651-46f9-8733-4cd51229e903 node DatanodeRegistration(127.0.0.1:45401, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=36581, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T03:56:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2bfa8359747d4d59 with lease ID 0xfd5c1d0ce6298f2c: from storage DS-1f88b0bb-0869-477f-b3fd-b15c04318277 node DatanodeRegistration(127.0.0.1:45401, datanodeUuid=713432d8-9621-4318-9e01-0877af36c355, infoPort=36581, infoSecurePort=0, ipcPort=45781, storageInfo=lv=-57;cid=testClusterID;nsid=1931369422;c=1731556595898), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:56:57,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:57,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:57,643 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-14T03:56:57,646 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-14T03:56:57,647 ERROR [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41415,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:57,648 WARN [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41415,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:57,648 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C32967%2C1731556596816:(num 1731556612017) roll requested 2024-11-14T03:56:57,648 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.1731556617648 2024-11-14T03:56:57,655 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 newFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 2024-11-14T03:56:57,655 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:57,655 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:57,655 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:57,655 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:57,655 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:57,656 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 2024-11-14T03:56:57,656 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41415,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:57,656 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41415,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:56:57,656 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:57,656 WARN [IPC Server handler 1 on default port 42105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-14T03:56:57,657 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36581:36581),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-14T03:56:57,657 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 after 1ms 2024-11-14T03:56:57,657 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 is not closed yet, will try archiving it next time 2024-11-14T03:56:58,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:58,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:59,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:59,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:56:59,658 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:56:59,666 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 newFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:56:59,666 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:59,666 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:59,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:59,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:59,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:56:59,667 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:56:59,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741838_1019 (size=1264) 2024-11-14T03:56:59,669 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41693:41693),(127.0.0.1/127.0.0.1:36581:36581)] 2024-11-14T03:56:59,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741838_1019 (size=1264) 2024-11-14T03:56:59,670 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 is not closed yet, will try archiving it next time 2024-11-14T03:56:59,670 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 is not closed yet, will try archiving it next time 2024-11-14T03:56:59,670 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:59,670 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:59,670 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 is not closed yet, will try archiving it next time 2024-11-14T03:56:59,670 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 after 0ms 2024-11-14T03:56:59,671 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:59,682 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731556598642/Put/vlen=218/seqid=0] 2024-11-14T03:56:59,683 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731556608331/Put/vlen=1045/seqid=0] 2024-11-14T03:56:59,683 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556597774 2024-11-14T03:56:59,683 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:59,683 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:59,683 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 after 0ms 2024-11-14T03:56:59,683 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:59,687 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731556612016/Put/vlen=1045/seqid=0] 2024-11-14T03:56:59,687 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731556614047/Put/vlen=1045/seqid=0] 2024-11-14T03:56:59,687 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 2024-11-14T03:56:59,687 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 2024-11-14T03:56:59,687 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 2024-11-14T03:56:59,688 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 after 1ms 2024-11-14T03:56:59,688 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556617648 2024-11-14T03:56:59,692 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731556617647/Put/vlen=1045/seqid=0] 2024-11-14T03:56:59,692 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:56:59,692 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:56:59,692 WARN [IPC Server handler 0 on default port 42105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-14T03:56:59,692 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 after 0ms 2024-11-14T03:57:00,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:00,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:00,517 WARN [ResponseProcessor for block BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:00,517 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1575121715_22 at /127.0.0.1:44134 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38983:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44134 dst: /127.0.0.1:38983 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38983 remote=/127.0.0.1:44134]. Total timeout mills is 60000, 59149 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:57:00,517 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1575121715_22 at /127.0.0.1:42098 [Receiving block BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45401:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42098 dst: /127.0.0.1:45401 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:57:00,517 WARN [DataStreamer for file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 block BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38983,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK], DatanodeInfoWithStorage[127.0.0.1:45401,DS-ecfb4e98-8651-46f9-8733-4cd51229e903,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38983,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]) is bad. 2024-11-14T03:57:00,521 WARN [DataStreamer for file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 block BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:00,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741839_1022 (size=85) 2024-11-14T03:57:00,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741839_1022 (size=85) 2024-11-14T03:57:01,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:01,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:01,515 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T03:57:01,658 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556612017 after 4002ms 2024-11-14T03:57:02,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:02,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:03,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:03,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:03,693 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 after 4001ms 2024-11-14T03:57:03,693 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:57:03,697 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:57:03,697 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing fae8a96709dbf13c257406dcd3c2f42a 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-14T03:57:03,698 ERROR [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,698 WARN [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,699 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C32967%2C1731556596816:(num 1731556619658) roll requested 2024-11-14T03:57:03,699 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.1731556623699 2024-11-14T03:57:03,705 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 newFile=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556623699 2024-11-14T03:57:03,705 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,705 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,705 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,706 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,706 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,706 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556623699 2024-11-14T03:57:03,706 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,706 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-75461122-172.17.0.2-1731556595898:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,706 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:57:03,707 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36581:36581),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-14T03:57:03,707 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 is not closed yet, will try archiving it next time 2024-11-14T03:57:03,707 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 after 1ms 2024-11-14T03:57:03,708 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.1731556619658 to hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs/6a989be77d39%2C32967%2C1731556596816.1731556619658 2024-11-14T03:57:03,730 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/.tmp/info/872113c24aeb4a8790681d4e3074969d is 1080, key is row1002/info:/1731556608331/Put/seqid=0 2024-11-14T03:57:03,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741841_1024 (size=9270) 2024-11-14T03:57:03,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741841_1024 (size=9270) 2024-11-14T03:57:03,736 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/.tmp/info/872113c24aeb4a8790681d4e3074969d 2024-11-14T03:57:03,744 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/.tmp/info/872113c24aeb4a8790681d4e3074969d as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/info/872113c24aeb4a8790681d4e3074969d 2024-11-14T03:57:03,750 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/info/872113c24aeb4a8790681d4e3074969d, entries=4, sequenceid=8, filesize=9.1 K 2024-11-14T03:57:03,752 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for fae8a96709dbf13c257406dcd3c2f42a in 55ms, sequenceid=8, compaction requested=false 2024-11-14T03:57:03,752 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for fae8a96709dbf13c257406dcd3c2f42a: 2024-11-14T03:57:03,752 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-14T03:57:03,753 ERROR [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,753 WARN [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9-prefix:6a989be77d39,32967,1731556596816.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,753 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C32967%2C1731556596816.meta:.meta(num 1731556598121) roll requested 2024-11-14T03:57:03,753 INFO [regionserver/6a989be77d39:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C32967%2C1731556596816.meta.1731556623753.meta 2024-11-14T03:57:03,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:03,759 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556623753.meta 2024-11-14T03:57:03,761 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,761 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:03,761 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta 2024-11-14T03:57:03,762 WARN [IPC Server handler 0 on default port 42105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-14T03:57:03,762 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta after 1ms 2024-11-14T03:57:03,766 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36581:36581),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-14T03:57:03,766 DEBUG [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta is not closed yet, will try archiving it next time 2024-11-14T03:57:03,790 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/info/65f8a4a654b54cb3b082bbd540fdd268 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a./info:regioninfo/1731556598647/Put/seqid=0 2024-11-14T03:57:03,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741843_1027 (size=7125) 2024-11-14T03:57:03,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741843_1027 (size=7125) 2024-11-14T03:57:03,797 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/info/65f8a4a654b54cb3b082bbd540fdd268 2024-11-14T03:57:03,824 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/ns/798be82fd5a24faa939f8d0db3ac3457 is 43, key is default/ns:d/1731556598178/Put/seqid=0 2024-11-14T03:57:03,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741844_1028 (size=5153) 2024-11-14T03:57:03,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741844_1028 (size=5153) 2024-11-14T03:57:03,830 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/ns/798be82fd5a24faa939f8d0db3ac3457 2024-11-14T03:57:03,850 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/table/644242c668bf4d0bb262afa8761df584 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731556598665/Put/seqid=0 2024-11-14T03:57:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741845_1029 (size=5438) 2024-11-14T03:57:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741845_1029 (size=5438) 2024-11-14T03:57:03,855 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/table/644242c668bf4d0bb262afa8761df584 2024-11-14T03:57:03,863 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/info/65f8a4a654b54cb3b082bbd540fdd268 as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/info/65f8a4a654b54cb3b082bbd540fdd268 2024-11-14T03:57:03,868 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/info/65f8a4a654b54cb3b082bbd540fdd268, entries=10, sequenceid=11, filesize=7.0 K 2024-11-14T03:57:03,869 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/ns/798be82fd5a24faa939f8d0db3ac3457 as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/ns/798be82fd5a24faa939f8d0db3ac3457 2024-11-14T03:57:03,875 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/ns/798be82fd5a24faa939f8d0db3ac3457, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T03:57:03,875 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/.tmp/table/644242c668bf4d0bb262afa8761df584 as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/table/644242c668bf4d0bb262afa8761df584 2024-11-14T03:57:03,881 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/table/644242c668bf4d0bb262afa8761df584, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T03:57:03,882 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-11-14T03:57:03,882 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T03:57:03,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:57:03,888 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:57:03,888 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:57:03,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:57:03,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:57:03,889 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:57:03,889 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:57:03,889 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=563553000, stopped=false 2024-11-14T03:57:03,889 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,39519,1731556596762 2024-11-14T03:57:03,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:57:03,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:57:03,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:03,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:03,891 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:57:03,892 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:57:03,892 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:57:03,892 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:57:03,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:57:03,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:57:03,892 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,32967,1731556596816' ***** 2024-11-14T03:57:03,892 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(3091): Received CLOSE for fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:57:03,893 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,32967,1731556596816 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:57:03,893 INFO [RS:0;6a989be77d39:32967 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:32967. 2024-11-14T03:57:03,893 DEBUG [RS:0;6a989be77d39:32967 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:57:03,893 DEBUG [RS:0;6a989be77d39:32967 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:57:03,893 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fae8a96709dbf13c257406dcd3c2f42a, disabling compactions & flushes 2024-11-14T03:57:03,894 INFO [RS:0;6a989be77d39:32967 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:57:03,894 INFO [RS:0;6a989be77d39:32967 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:57:03,894 INFO [RS:0;6a989be77d39:32967 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:57:03,894 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:57:03,894 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. after waiting 0 ms 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:57:03,894 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T03:57:03,894 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1325): Online Regions={fae8a96709dbf13c257406dcd3c2f42a=TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T03:57:03,894 DEBUG [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, fae8a96709dbf13c257406dcd3c2f42a 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:57:03,894 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:57:03,894 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:57:03,900 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/default/TestLogRolling-testLogRollOnPipelineRestart/fae8a96709dbf13c257406dcd3c2f42a/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-14T03:57:03,901 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:57:03,901 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fae8a96709dbf13c257406dcd3c2f42a: Waiting for close lock at 1731556623893Running coprocessor pre-close hooks at 1731556623893Disabling compacts and flushes for region at 1731556623893Disabling writes for close at 1731556623894 (+1 ms)Writing region close event to WAL at 1731556623895 (+1 ms)Running coprocessor post-close hooks at 1731556623901 (+6 ms)Closed at 1731556623901 2024-11-14T03:57:03,901 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731556598264.fae8a96709dbf13c257406dcd3c2f42a. 2024-11-14T03:57:03,908 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T03:57:03,909 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:57:03,909 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:57:03,909 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556623894Running coprocessor pre-close hooks at 1731556623894Disabling compacts and flushes for region at 1731556623894Disabling writes for close at 1731556623894Writing region close event to WAL at 1731556623898 (+4 ms)Running coprocessor post-close hooks at 1731556623909 (+11 ms)Closed at 1731556623909 2024-11-14T03:57:03,909 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:57:04,094 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,32967,1731556596816; all regions closed. 2024-11-14T03:57:04,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:04,100 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:04,100 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:04,101 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:04,101 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:04,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741842_1025 (size=825) 2024-11-14T03:57:04,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741842_1025 (size=825) 2024-11-14T03:57:04,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:04,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:04,620 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T03:57:04,620 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T03:57:05,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:05,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:05,621 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:57:06,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:06,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:06,731 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T03:57:06,741 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:57:07,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:07,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:07,763 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta after 4002ms 2024-11-14T03:57:07,764 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/WALs/6a989be77d39,32967,1731556596816/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta to hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs/6a989be77d39%2C32967%2C1731556596816.meta.1731556598121.meta 2024-11-14T03:57:07,766 DEBUG [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs 2024-11-14T03:57:07,766 INFO [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C32967%2C1731556596816.meta:.meta(num 1731556623753) 2024-11-14T03:57:07,767 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,767 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,767 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741840_1023 (size=1162) 2024-11-14T03:57:07,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741840_1023 (size=1162) 2024-11-14T03:57:07,773 DEBUG [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs 2024-11-14T03:57:07,773 INFO [RS:0;6a989be77d39:32967 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C32967%2C1731556596816:(num 1731556623699) 2024-11-14T03:57:07,773 DEBUG [RS:0;6a989be77d39:32967 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:57:07,773 INFO [RS:0;6a989be77d39:32967 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:57:07,773 INFO [RS:0;6a989be77d39:32967 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:57:07,774 INFO [RS:0;6a989be77d39:32967 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T03:57:07,774 INFO [RS:0;6a989be77d39:32967 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:57:07,774 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:57:07,774 INFO [RS:0;6a989be77d39:32967 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32967 2024-11-14T03:57:07,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:57:07,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,32967,1731556596816 2024-11-14T03:57:07,778 INFO [RS:0;6a989be77d39:32967 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:57:07,780 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,32967,1731556596816] 2024-11-14T03:57:07,781 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,32967,1731556596816 already deleted, retry=false 2024-11-14T03:57:07,781 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,32967,1731556596816 expired; onlineServers=0 2024-11-14T03:57:07,781 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,39519,1731556596762' ***** 2024-11-14T03:57:07,781 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:57:07,781 INFO [M:0;6a989be77d39:39519 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:57:07,781 INFO [M:0;6a989be77d39:39519 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:57:07,781 DEBUG [M:0;6a989be77d39:39519 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:57:07,781 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:57:07,781 DEBUG [M:0;6a989be77d39:39519 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:57:07,782 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556597481 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556597481,5,FailOnTimeoutGroup] 2024-11-14T03:57:07,782 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556597485 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556597485,5,FailOnTimeoutGroup] 2024-11-14T03:57:07,782 INFO [M:0;6a989be77d39:39519 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:57:07,782 INFO [M:0;6a989be77d39:39519 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:57:07,782 DEBUG [M:0;6a989be77d39:39519 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:57:07,782 INFO [M:0;6a989be77d39:39519 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:57:07,782 INFO [M:0;6a989be77d39:39519 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:57:07,782 INFO [M:0;6a989be77d39:39519 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:57:07,782 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:57:07,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:57:07,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:07,783 DEBUG [M:0;6a989be77d39:39519 {}] zookeeper.ZKUtil(347): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T03:57:07,783 WARN [M:0;6a989be77d39:39519 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T03:57:07,784 INFO [M:0;6a989be77d39:39519 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/.lastflushedseqids 2024-11-14T03:57:07,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741846_1030 (size=130) 2024-11-14T03:57:07,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741846_1030 (size=130) 2024-11-14T03:57:07,789 INFO [M:0;6a989be77d39:39519 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:57:07,789 INFO [M:0;6a989be77d39:39519 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:57:07,790 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:57:07,790 INFO [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:07,790 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:07,790 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:57:07,790 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:07,790 INFO [M:0;6a989be77d39:39519 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-14T03:57:07,790 ERROR [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData-prefix:6a989be77d39,39519,1731556596762 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:07,790 WARN [FSHLog-0-hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData-prefix:6a989be77d39,39519,1731556596762 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:07,790 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 6a989be77d39%2C39519%2C1731556596762:(num 1731556597333) roll requested 2024-11-14T03:57:07,791 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C39519%2C1731556596762.1731556627791 2024-11-14T03:57:07,795 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,795 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,795 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,795 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,795 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,795 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556627791 2024-11-14T03:57:07,796 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:07,796 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36469,DS-2cc998dc-1f4f-4600-a72a-19a1047db2bd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T03:57:07,796 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 2024-11-14T03:57:07,796 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36581:36581),(127.0.0.1/127.0.0.1:41693:41693)] 2024-11-14T03:57:07,796 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 is not closed yet, will try archiving it next time 2024-11-14T03:57:07,796 WARN [IPC Server handler 2 on default port 42105 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-14T03:57:07,797 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 after 0ms 2024-11-14T03:57:07,812 DEBUG [M:0;6a989be77d39:39519 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cef24b6559f4c6c81603c7a39c35ea7 is 82, key is hbase:meta,,1/info:regioninfo/1731556598158/Put/seqid=0 2024-11-14T03:57:07,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741848_1033 (size=5672) 2024-11-14T03:57:07,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741848_1033 (size=5672) 2024-11-14T03:57:07,817 INFO [M:0;6a989be77d39:39519 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cef24b6559f4c6c81603c7a39c35ea7 2024-11-14T03:57:07,839 DEBUG [M:0;6a989be77d39:39519 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b8da5d3dda214523b5ef2ef94f45b03e is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731556598671/Put/seqid=0 2024-11-14T03:57:07,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741849_1034 (size=6119) 2024-11-14T03:57:07,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741849_1034 (size=6119) 2024-11-14T03:57:07,844 INFO [M:0;6a989be77d39:39519 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b8da5d3dda214523b5ef2ef94f45b03e 2024-11-14T03:57:07,869 DEBUG [M:0;6a989be77d39:39519 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b82134fa1344f3eb7430954a32ca62f is 69, key is 6a989be77d39,32967,1731556596816/rs:state/1731556597588/Put/seqid=0 2024-11-14T03:57:07,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741850_1035 (size=5156) 2024-11-14T03:57:07,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741850_1035 (size=5156) 2024-11-14T03:57:07,875 INFO [M:0;6a989be77d39:39519 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b82134fa1344f3eb7430954a32ca62f 2024-11-14T03:57:07,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:57:07,880 INFO [RS:0;6a989be77d39:32967 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:57:07,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32967-0x100ef63d0b20001, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:57:07,880 INFO [RS:0;6a989be77d39:32967 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,32967,1731556596816; zookeeper connection closed. 2024-11-14T03:57:07,880 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@640b2d96 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@640b2d96 2024-11-14T03:57:07,881 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T03:57:07,894 DEBUG [M:0;6a989be77d39:39519 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0732cd5d4f254e9a9be0a0d5ba696221 is 52, key is load_balancer_on/state:d/1731556598259/Put/seqid=0 2024-11-14T03:57:07,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741851_1036 (size=5056) 2024-11-14T03:57:07,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741851_1036 (size=5056) 2024-11-14T03:57:07,899 INFO [M:0;6a989be77d39:39519 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0732cd5d4f254e9a9be0a0d5ba696221 2024-11-14T03:57:07,904 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cef24b6559f4c6c81603c7a39c35ea7 as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5cef24b6559f4c6c81603c7a39c35ea7 2024-11-14T03:57:07,909 INFO [M:0;6a989be77d39:39519 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5cef24b6559f4c6c81603c7a39c35ea7, entries=8, sequenceid=56, filesize=5.5 K 2024-11-14T03:57:07,910 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b8da5d3dda214523b5ef2ef94f45b03e as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b8da5d3dda214523b5ef2ef94f45b03e 2024-11-14T03:57:07,914 INFO [M:0;6a989be77d39:39519 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b8da5d3dda214523b5ef2ef94f45b03e, entries=6, sequenceid=56, filesize=6.0 K 2024-11-14T03:57:07,915 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b82134fa1344f3eb7430954a32ca62f as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b82134fa1344f3eb7430954a32ca62f 2024-11-14T03:57:07,920 INFO [M:0;6a989be77d39:39519 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b82134fa1344f3eb7430954a32ca62f, entries=1, sequenceid=56, filesize=5.0 K 2024-11-14T03:57:07,921 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0732cd5d4f254e9a9be0a0d5ba696221 as hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0732cd5d4f254e9a9be0a0d5ba696221 2024-11-14T03:57:07,927 INFO [M:0;6a989be77d39:39519 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0732cd5d4f254e9a9be0a0d5ba696221, entries=1, sequenceid=56, filesize=4.9 K 2024-11-14T03:57:07,928 INFO [M:0;6a989be77d39:39519 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=56, compaction requested=false 2024-11-14T03:57:07,929 INFO [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:07,929 DEBUG [M:0;6a989be77d39:39519 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556627790Disabling compacts and flushes for region at 1731556627790Disabling writes for close at 1731556627790Obtaining lock to block concurrent updates at 1731556627790Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556627790Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731556627790Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556627797 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556627797Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556627811 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556627811Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556627824 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556627838 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556627838Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556627850 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556627868 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556627868Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556627880 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556627893 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556627893Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10b2a864: reopening flushed file at 1731556627904 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67f582f0: reopening flushed file at 1731556627909 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23312f71: reopening flushed file at 1731556627915 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cb8bc34: reopening flushed file at 1731556627920 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=56, compaction requested=false at 1731556627928 (+8 ms)Writing region close event to WAL at 1731556627929 (+1 ms)Closed at 1731556627929 2024-11-14T03:57:07,930 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,930 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,930 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,930 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:07,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38983 is added to blk_1073741847_1031 (size=757) 2024-11-14T03:57:07,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45401 is added to blk_1073741847_1031 (size=757) 2024-11-14T03:57:08,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:08,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:08,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:08,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:09,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:09,431 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:57:09,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:09,731 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T03:57:10,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:10,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:10,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:57:10,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:57:10,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T03:57:10,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T03:57:11,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:11,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:11,797 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 after 4001ms 2024-11-14T03:57:11,798 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/WALs/6a989be77d39,39519,1731556596762/6a989be77d39%2C39519%2C1731556596762.1731556597333 to hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/oldWALs/6a989be77d39%2C39519%2C1731556596762.1731556597333 2024-11-14T03:57:11,801 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/MasterData/oldWALs/6a989be77d39%2C39519%2C1731556596762.1731556597333 to hdfs://localhost:42105/user/jenkins/test-data/20daef89-13bb-3bb7-f695-d17bbc5558c9/oldWALs/6a989be77d39%2C39519%2C1731556596762.1731556597333$masterlocalwal$ 2024-11-14T03:57:11,802 INFO [M:0;6a989be77d39:39519 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:57:11,802 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:57:11,802 INFO [M:0;6a989be77d39:39519 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39519 2024-11-14T03:57:11,802 INFO [M:0;6a989be77d39:39519 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:57:11,904 INFO [M:0;6a989be77d39:39519 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:57:11,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:57:11,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39519-0x100ef63d0b20000, quorum=127.0.0.1:50409, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:57:11,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@661bf07e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:57:11,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1decdda3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:57:11,912 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:57:11,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39f8899d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:57:11,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21404da7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:57:11,913 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:57:11,913 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:57:11,913 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:57:11,913 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-75461122-172.17.0.2-1731556595898 (Datanode Uuid 713432d8-9621-4318-9e01-0877af36c355) service to localhost/127.0.0.1:42105 2024-11-14T03:57:11,914 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data3/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:57:11,914 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data4/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:57:11,915 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:57:11,917 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c2b3ac7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:57:11,918 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20432799{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:57:11,918 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:57:11,918 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@443f9e40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:57:11,918 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dcfabbb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:57:11,920 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:57:11,920 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:57:11,920 WARN [BP-75461122-172.17.0.2-1731556595898 heartbeating to localhost/127.0.0.1:42105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-75461122-172.17.0.2-1731556595898 (Datanode Uuid a917d2b8-e024-4ec2-ac42-8cb3e09fd5d4) service to localhost/127.0.0.1:42105 2024-11-14T03:57:11,920 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:57:11,920 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data1/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:57:11,920 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/cluster_154bfd19-4a97-05fa-ff32-e432c0365ddd/data/data2/current/BP-75461122-172.17.0.2-1731556595898 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:57:11,921 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:57:11,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1698d36f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:57:11,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13fdd007{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:57:11,928 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:57:11,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14ea830a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:57:11,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24710539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir/,STOPPED} 2024-11-14T03:57:11,936 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:57:11,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:57:11,966 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42105 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:42105 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:42105 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:42105 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42105 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:42105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42105 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42105 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=436 (was 405) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 12), AvailableMemoryMB=5463 (was 5768) 2024-11-14T03:57:11,975 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=436, ProcessCount=11, AvailableMemoryMB=5463 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.log.dir so I do NOT create it in target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ca9c21c1-cef7-eb81-1ca2-61ad6d8bab2d/hadoop.tmp.dir so I do NOT create it in target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9, deleteOnExit=true 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/test.cache.data in system properties and HBase conf 2024-11-14T03:57:11,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:57:11,977 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:57:11,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:57:11,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:57:11,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:57:11,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:57:11,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:57:11,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:57:11,992 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:57:12,078 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:57:12,085 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:57:12,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:57:12,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:57:12,088 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:57:12,089 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:57:12,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56673fe9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:57:12,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b7f365e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:57:12,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:12,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:12,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22e47138{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/java.io.tmpdir/jetty-localhost-33111-hadoop-hdfs-3_4_1-tests_jar-_-any-11899295142030863259/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:57:12,223 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@232381c8{HTTP/1.1, (http/1.1)}{localhost:33111} 2024-11-14T03:57:12,223 INFO [Time-limited test {}] server.Server(415): Started @188235ms 2024-11-14T03:57:12,235 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:57:12,288 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:57:12,291 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:57:12,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:57:12,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:57:12,292 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:57:12,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@521506bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:57:12,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a3c93e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:57:12,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78617008{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/java.io.tmpdir/jetty-localhost-43267-hadoop-hdfs-3_4_1-tests_jar-_-any-18346363770569494777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:57:12,410 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fa18b90{HTTP/1.1, (http/1.1)}{localhost:43267} 2024-11-14T03:57:12,410 INFO [Time-limited test {}] server.Server(415): Started @188422ms 2024-11-14T03:57:12,411 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:57:12,452 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:57:12,455 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:57:12,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:57:12,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:57:12,457 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:57:12,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7101d128{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:57:12,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ecaedd6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:57:12,510 WARN [Thread-1630 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data2/current/BP-1861591780-172.17.0.2-1731556632011/current, will proceed with Du for space computation calculation, 2024-11-14T03:57:12,510 WARN [Thread-1629 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data1/current/BP-1861591780-172.17.0.2-1731556632011/current, will proceed with Du for space computation calculation, 2024-11-14T03:57:12,539 WARN [Thread-1608 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:57:12,541 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11147c457453113e with lease ID 0x4986b2f6be0f7515: Processing first storage report for DS-64cf232c-03d4-4938-8404-216d7940205d from datanode DatanodeRegistration(127.0.0.1:33565, datanodeUuid=1ea4d249-0825-402b-a1a1-9299b27eb1e9, infoPort=35525, infoSecurePort=0, ipcPort=35057, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011) 2024-11-14T03:57:12,542 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11147c457453113e with lease ID 0x4986b2f6be0f7515: from storage DS-64cf232c-03d4-4938-8404-216d7940205d node DatanodeRegistration(127.0.0.1:33565, datanodeUuid=1ea4d249-0825-402b-a1a1-9299b27eb1e9, infoPort=35525, infoSecurePort=0, ipcPort=35057, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:57:12,542 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11147c457453113e with lease ID 0x4986b2f6be0f7515: Processing first storage report for DS-a3811fc1-f356-4510-9050-aa47ec6df494 from datanode DatanodeRegistration(127.0.0.1:33565, datanodeUuid=1ea4d249-0825-402b-a1a1-9299b27eb1e9, infoPort=35525, infoSecurePort=0, ipcPort=35057, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011) 2024-11-14T03:57:12,542 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11147c457453113e with lease ID 0x4986b2f6be0f7515: from storage DS-a3811fc1-f356-4510-9050-aa47ec6df494 node DatanodeRegistration(127.0.0.1:33565, datanodeUuid=1ea4d249-0825-402b-a1a1-9299b27eb1e9, infoPort=35525, infoSecurePort=0, ipcPort=35057, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:57:12,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bd1231f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/java.io.tmpdir/jetty-localhost-36819-hadoop-hdfs-3_4_1-tests_jar-_-any-5294970033176717602/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:57:12,585 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fa194e5{HTTP/1.1, (http/1.1)}{localhost:36819} 2024-11-14T03:57:12,585 INFO [Time-limited test {}] server.Server(415): Started @188597ms 2024-11-14T03:57:12,586 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:57:12,677 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data3/current/BP-1861591780-172.17.0.2-1731556632011/current, will proceed with Du for space computation calculation, 2024-11-14T03:57:12,677 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data4/current/BP-1861591780-172.17.0.2-1731556632011/current, will proceed with Du for space computation calculation, 2024-11-14T03:57:12,700 WARN [Thread-1644 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:57:12,703 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7f1d981ec7f755d with lease ID 0x4986b2f6be0f7516: Processing first storage report for DS-b668e719-c740-4625-ad04-8b378237e342 from datanode DatanodeRegistration(127.0.0.1:39077, datanodeUuid=e4dc1f39-5d39-4004-afb5-8f56cc289eaa, infoPort=40181, infoSecurePort=0, ipcPort=35379, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011) 2024-11-14T03:57:12,703 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7f1d981ec7f755d with lease ID 0x4986b2f6be0f7516: from storage DS-b668e719-c740-4625-ad04-8b378237e342 node DatanodeRegistration(127.0.0.1:39077, datanodeUuid=e4dc1f39-5d39-4004-afb5-8f56cc289eaa, infoPort=40181, infoSecurePort=0, ipcPort=35379, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:57:12,703 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7f1d981ec7f755d with lease ID 0x4986b2f6be0f7516: Processing first storage report for DS-99f4fa08-6bcd-490f-a59d-cb266b7c2938 from datanode DatanodeRegistration(127.0.0.1:39077, datanodeUuid=e4dc1f39-5d39-4004-afb5-8f56cc289eaa, infoPort=40181, infoSecurePort=0, ipcPort=35379, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011) 2024-11-14T03:57:12,703 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7f1d981ec7f755d with lease ID 0x4986b2f6be0f7516: from storage DS-99f4fa08-6bcd-490f-a59d-cb266b7c2938 node DatanodeRegistration(127.0.0.1:39077, datanodeUuid=e4dc1f39-5d39-4004-afb5-8f56cc289eaa, infoPort=40181, infoSecurePort=0, ipcPort=35379, storageInfo=lv=-57;cid=testClusterID;nsid=1484467462;c=1731556632011), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:57:12,709 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa 2024-11-14T03:57:12,711 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/zookeeper_0, clientPort=54988, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:57:12,712 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54988 2024-11-14T03:57:12,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,714 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:57:12,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:57:12,723 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b with version=8 2024-11-14T03:57:12,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:57:12,726 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:57:12,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:57:12,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:57:12,726 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:57:12,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:57:12,726 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:57:12,726 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:57:12,727 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:57:12,727 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40453 2024-11-14T03:57:12,729 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40453 connecting to ZooKeeper ensemble=127.0.0.1:54988 2024-11-14T03:57:12,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404530x0, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:57:12,741 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40453-0x100ef645d310000 connected 2024-11-14T03:57:12,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,756 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,758 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:57:12,758 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b, hbase.cluster.distributed=false 2024-11-14T03:57:12,760 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:57:12,760 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40453 2024-11-14T03:57:12,760 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40453 2024-11-14T03:57:12,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40453 2024-11-14T03:57:12,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40453 2024-11-14T03:57:12,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40453 2024-11-14T03:57:12,777 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:57:12,777 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:57:12,777 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:57:12,777 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:57:12,778 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:57:12,778 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:57:12,778 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:57:12,778 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:57:12,778 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38181 2024-11-14T03:57:12,780 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38181 connecting to ZooKeeper ensemble=127.0.0.1:54988 2024-11-14T03:57:12,780 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381810x0, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:57:12,786 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:57:12,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38181-0x100ef645d310001 connected 2024-11-14T03:57:12,786 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:57:12,787 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:57:12,788 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:57:12,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:57:12,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38181 2024-11-14T03:57:12,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38181 2024-11-14T03:57:12,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38181 2024-11-14T03:57:12,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38181 2024-11-14T03:57:12,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38181 2024-11-14T03:57:12,802 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:40453 2024-11-14T03:57:12,803 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,40453,1731556632726 2024-11-14T03:57:12,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:57:12,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:57:12,806 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,40453,1731556632726 2024-11-14T03:57:12,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:12,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:57:12,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:12,809 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:57:12,809 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,40453,1731556632726 from backup master directory 2024-11-14T03:57:12,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,40453,1731556632726 2024-11-14T03:57:12,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:57:12,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:57:12,811 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:57:12,811 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,40453,1731556632726 2024-11-14T03:57:12,816 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/hbase.id] with ID: 6fa0eaa8-569d-49f1-91b6-db4e25762533 2024-11-14T03:57:12,816 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/.tmp/hbase.id 2024-11-14T03:57:12,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:57:12,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:57:12,830 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/.tmp/hbase.id]:[hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/hbase.id] 2024-11-14T03:57:12,844 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:12,844 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:57:12,845 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T03:57:12,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:12,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:57:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:57:12,862 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:57:12,863 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:57:12,863 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:57:12,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:57:12,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:57:12,878 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store 2024-11-14T03:57:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:57:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:57:13,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:13,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:13,304 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:57:13,304 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:57:13,304 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:13,304 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:13,304 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:57:13,305 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:13,305 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:57:13,305 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556633304Disabling compacts and flushes for region at 1731556633304Disabling writes for close at 1731556633305 (+1 ms)Writing region close event to WAL at 1731556633305Closed at 1731556633305 2024-11-14T03:57:13,306 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/.initializing 2024-11-14T03:57:13,306 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/WALs/6a989be77d39,40453,1731556632726 2024-11-14T03:57:13,309 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C40453%2C1731556632726, suffix=, logDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/WALs/6a989be77d39,40453,1731556632726, archiveDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/oldWALs, maxLogs=10 2024-11-14T03:57:13,309 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C40453%2C1731556632726.1731556633309 2024-11-14T03:57:13,315 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/WALs/6a989be77d39,40453,1731556632726/6a989be77d39%2C40453%2C1731556632726.1731556633309 2024-11-14T03:57:13,318 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40181:40181),(127.0.0.1/127.0.0.1:35525:35525)] 2024-11-14T03:57:13,320 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:57:13,320 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:57:13,320 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,320 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,322 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,323 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:57:13,323 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,324 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:13,324 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,325 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:57:13,325 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,325 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:57:13,325 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,326 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:57:13,326 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:57:13,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:57:13,328 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:57:13,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,329 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,330 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,331 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,331 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,332 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:57:13,333 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:57:13,335 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:57:13,335 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799360, jitterRate=0.016439512372016907}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:57:13,336 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556633320Initializing all the Stores at 1731556633321 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556633321Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556633321Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556633321Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556633322 (+1 ms)Cleaning up temporary data from old regions at 1731556633331 (+9 ms)Region opened successfully at 1731556633336 (+5 ms) 2024-11-14T03:57:13,336 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:57:13,339 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74e30cd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:57:13,340 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:57:13,341 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:57:13,341 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:57:13,341 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:57:13,341 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T03:57:13,341 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T03:57:13,342 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:57:13,344 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:57:13,344 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:57:13,346 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:57:13,346 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:57:13,347 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:57:13,348 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:57:13,348 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:57:13,349 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:57:13,350 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:57:13,351 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:57:13,352 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:57:13,353 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:57:13,354 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:57:13,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:57:13,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:57:13,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,357 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,40453,1731556632726, sessionid=0x100ef645d310000, setting cluster-up flag (Was=false) 2024-11-14T03:57:13,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,365 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:57:13,366 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,40453,1731556632726 2024-11-14T03:57:13,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,374 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:57:13,375 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,40453,1731556632726 2024-11-14T03:57:13,376 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:57:13,378 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:57:13,378 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:57:13,378 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:57:13,379 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,40453,1731556632726 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:57:13,380 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556663383 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,384 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:57:13,385 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:57:13,385 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:57:13,385 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:57:13,385 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:57:13,385 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:57:13,385 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:57:13,385 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556633385,5,FailOnTimeoutGroup] 2024-11-14T03:57:13,385 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556633385,5,FailOnTimeoutGroup] 2024-11-14T03:57:13,386 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,386 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:57:13,386 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,386 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,386 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,386 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:57:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:57:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:57:13,393 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(746): ClusterId : 6fa0eaa8-569d-49f1-91b6-db4e25762533 2024-11-14T03:57:13,393 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:57:13,394 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:57:13,394 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b 2024-11-14T03:57:13,395 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:57:13,395 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:57:13,398 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:57:13,398 DEBUG [RS:0;6a989be77d39:38181 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f150496, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:57:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:57:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:57:13,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:57:13,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:57:13,413 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:57:13,413 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:13,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:57:13,415 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:38181 2024-11-14T03:57:13,415 INFO [RS:0;6a989be77d39:38181 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:57:13,415 INFO [RS:0;6a989be77d39:38181 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:57:13,415 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:57:13,415 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:57:13,415 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,415 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,40453,1731556632726 with port=38181, startcode=1731556632777 2024-11-14T03:57:13,416 DEBUG [RS:0;6a989be77d39:38181 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:57:13,416 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:13,416 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:57:13,417 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:57:13,418 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:13,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:57:13,418 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:57:13,419 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40453 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,419 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40453 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:57:13,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:13,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:13,420 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:57:13,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740 2024-11-14T03:57:13,421 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b 2024-11-14T03:57:13,421 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42027 2024-11-14T03:57:13,421 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:57:13,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740 2024-11-14T03:57:13,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:57:13,422 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:57:13,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:57:13,423 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:57:13,423 DEBUG [RS:0;6a989be77d39:38181 {}] zookeeper.ZKUtil(111): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,423 WARN [RS:0;6a989be77d39:38181 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:57:13,423 INFO [RS:0;6a989be77d39:38181 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:57:13,423 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,424 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:57:13,425 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,38181,1731556632777] 2024-11-14T03:57:13,427 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:57:13,427 INFO [RS:0;6a989be77d39:38181 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:57:13,427 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816871, jitterRate=0.03870536386966705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:57:13,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556633410Initializing all the Stores at 1731556633411 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556633411Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556633411Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556633411Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556633411Cleaning up temporary data from old regions at 1731556633422 (+11 ms)Region opened successfully at 1731556633428 (+6 ms) 2024-11-14T03:57:13,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:57:13,428 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:57:13,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:57:13,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:57:13,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:57:13,429 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:57:13,429 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556633428Disabling compacts and flushes for region at 1731556633428Disabling writes for close at 1731556633428Writing region close event to WAL at 1731556633429 (+1 ms)Closed at 1731556633429 2024-11-14T03:57:13,430 INFO [RS:0;6a989be77d39:38181 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:57:13,430 INFO [RS:0;6a989be77d39:38181 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:57:13,430 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,430 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:57:13,430 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:57:13,430 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:57:13,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:57:13,432 INFO [RS:0;6a989be77d39:38181 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:57:13,432 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:57:13,433 DEBUG [RS:0;6a989be77d39:38181 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:57:13,434 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:57:13,436 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,436 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,436 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,436 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,436 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,436 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,38181,1731556632777-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:57:13,459 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:57:13,459 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,38181,1731556632777-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,459 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,459 INFO [RS:0;6a989be77d39:38181 {}] regionserver.Replication(171): 6a989be77d39,38181,1731556632777 started 2024-11-14T03:57:13,477 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:13,477 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,38181,1731556632777, RpcServer on 6a989be77d39/172.17.0.2:38181, sessionid=0x100ef645d310001 2024-11-14T03:57:13,477 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:57:13,477 DEBUG [RS:0;6a989be77d39:38181 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,477 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,38181,1731556632777' 2024-11-14T03:57:13,477 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:57:13,478 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:57:13,478 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:57:13,478 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:57:13,478 DEBUG [RS:0;6a989be77d39:38181 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,478 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,38181,1731556632777' 2024-11-14T03:57:13,478 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:57:13,479 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:57:13,479 DEBUG [RS:0;6a989be77d39:38181 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:57:13,479 INFO [RS:0;6a989be77d39:38181 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:57:13,479 INFO [RS:0;6a989be77d39:38181 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:57:13,581 INFO [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C38181%2C1731556632777, suffix=, logDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777, archiveDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/oldWALs, maxLogs=32 2024-11-14T03:57:13,582 INFO [RS:0;6a989be77d39:38181 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C38181%2C1731556632777.1731556633582 2024-11-14T03:57:13,585 WARN [6a989be77d39:40453 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T03:57:13,591 INFO [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556633582 2024-11-14T03:57:13,600 DEBUG [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40181:40181),(127.0.0.1/127.0.0.1:35525:35525)] 2024-11-14T03:57:13,835 DEBUG [6a989be77d39:40453 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:57:13,836 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,38181,1731556632777 2024-11-14T03:57:13,837 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,38181,1731556632777, state=OPENING 2024-11-14T03:57:13,838 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:57:13,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:57:13,841 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:57:13,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:57:13,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,38181,1731556632777}] 2024-11-14T03:57:13,841 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:57:13,994 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:57:13,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49263, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:57:14,001 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:57:14,001 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:57:14,003 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C38181%2C1731556632777.meta, suffix=.meta, logDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777, archiveDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/oldWALs, maxLogs=32 2024-11-14T03:57:14,004 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C38181%2C1731556632777.meta.1731556634003.meta 2024-11-14T03:57:14,009 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.meta.1731556634003.meta 2024-11-14T03:57:14,012 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35525:35525),(127.0.0.1/127.0.0.1:40181:40181)] 2024-11-14T03:57:14,012 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:57:14,013 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:57:14,013 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:57:14,013 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:57:14,013 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:57:14,013 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:57:14,013 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:57:14,013 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:57:14,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:57:14,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:57:14,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:14,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:14,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:57:14,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:57:14,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:14,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:14,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:57:14,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:57:14,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:14,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:14,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:57:14,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:57:14,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:14,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:57:14,020 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:57:14,021 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740 2024-11-14T03:57:14,022 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740 2024-11-14T03:57:14,023 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:57:14,023 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:57:14,024 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:57:14,025 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:57:14,026 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767701, jitterRate=-0.023818686604499817}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:57:14,026 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:57:14,027 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556634014Writing region info on filesystem at 1731556634014Initializing all the Stores at 1731556634014Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556634014Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556634014Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556634015 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556634015Cleaning up temporary data from old regions at 1731556634023 (+8 ms)Running coprocessor post-open hooks at 1731556634026 (+3 ms)Region opened successfully at 1731556634027 (+1 ms) 2024-11-14T03:57:14,028 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556633994 2024-11-14T03:57:14,031 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:57:14,031 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:57:14,032 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,38181,1731556632777 2024-11-14T03:57:14,033 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,38181,1731556632777, state=OPEN 2024-11-14T03:57:14,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:57:14,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:57:14,037 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,38181,1731556632777 2024-11-14T03:57:14,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:57:14,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:57:14,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:57:14,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,38181,1731556632777 in 196 msec 2024-11-14T03:57:14,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:57:14,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-14T03:57:14,043 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:57:14,043 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:57:14,044 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:57:14,045 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,38181,1731556632777, seqNum=-1] 2024-11-14T03:57:14,045 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:57:14,046 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52699, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:57:14,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 672 msec 2024-11-14T03:57:14,051 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556634051, completionTime=-1 2024-11-14T03:57:14,051 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:57:14,051 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556694053 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556754053 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40453,1731556632726-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40453,1731556632726-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40453,1731556632726-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,053 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:40453, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,054 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,054 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,055 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.246sec 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40453,1731556632726-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:57:14,057 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40453,1731556632726-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:57:14,059 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:57:14,059 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:57:14,059 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40453,1731556632726-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:14,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dca8de9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:57:14,093 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,40453,-1 for getting cluster id 2024-11-14T03:57:14,093 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:57:14,095 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6fa0eaa8-569d-49f1-91b6-db4e25762533' 2024-11-14T03:57:14,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:57:14,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6fa0eaa8-569d-49f1-91b6-db4e25762533" 2024-11-14T03:57:14,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b068343, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:57:14,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,40453,-1] 2024-11-14T03:57:14,097 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:57:14,097 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:57:14,098 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:57:14,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7009eb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:57:14,100 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:57:14,101 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,38181,1731556632777, seqNum=-1] 2024-11-14T03:57:14,101 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:57:14,103 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35010, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:57:14,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,40453,1731556632726 2024-11-14T03:57:14,105 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:57:14,109 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:57:14,110 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T03:57:14,112 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 6a989be77d39,40453,1731556632726 2024-11-14T03:57:14,112 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5de30b45 2024-11-14T03:57:14,112 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T03:57:14,114 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T03:57:14,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T03:57:14,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T03:57:14,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:57:14,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:14,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T03:57:14,126 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:14,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-14T03:57:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:57:14,128 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T03:57:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741835_1011 (size=405) 2024-11-14T03:57:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741835_1011 (size=405) 2024-11-14T03:57:14,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:14,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:14,545 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 60b27abd813b114b2fcbad5e2f02d67c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b 2024-11-14T03:57:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741836_1012 (size=88) 2024-11-14T03:57:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741836_1012 (size=88) 2024-11-14T03:57:14,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:57:14,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 60b27abd813b114b2fcbad5e2f02d67c, disabling compactions & flushes 2024-11-14T03:57:14,561 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. after waiting 0 ms 2024-11-14T03:57:14,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,561 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,561 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 60b27abd813b114b2fcbad5e2f02d67c: Waiting for close lock at 1731556634561Disabling compacts and flushes for region at 1731556634561Disabling writes for close at 1731556634561Writing region close event to WAL at 1731556634561Closed at 1731556634561 2024-11-14T03:57:14,564 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T03:57:14,565 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731556634564"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556634564"}]},"ts":"1731556634564"} 2024-11-14T03:57:14,568 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T03:57:14,569 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T03:57:14,569 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556634569"}]},"ts":"1731556634569"} 2024-11-14T03:57:14,572 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-14T03:57:14,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=60b27abd813b114b2fcbad5e2f02d67c, ASSIGN}] 2024-11-14T03:57:14,574 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=60b27abd813b114b2fcbad5e2f02d67c, ASSIGN 2024-11-14T03:57:14,576 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=60b27abd813b114b2fcbad5e2f02d67c, ASSIGN; state=OFFLINE, location=6a989be77d39,38181,1731556632777; forceNewPlan=false, retain=false 2024-11-14T03:57:14,727 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=60b27abd813b114b2fcbad5e2f02d67c, regionState=OPENING, regionLocation=6a989be77d39,38181,1731556632777 2024-11-14T03:57:14,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=60b27abd813b114b2fcbad5e2f02d67c, ASSIGN because future has completed 2024-11-14T03:57:14,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 60b27abd813b114b2fcbad5e2f02d67c, server=6a989be77d39,38181,1731556632777}] 2024-11-14T03:57:14,888 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,888 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 60b27abd813b114b2fcbad5e2f02d67c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:57:14,889 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,889 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:57:14,889 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,889 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,891 INFO [StoreOpener-60b27abd813b114b2fcbad5e2f02d67c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,893 INFO [StoreOpener-60b27abd813b114b2fcbad5e2f02d67c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 60b27abd813b114b2fcbad5e2f02d67c columnFamilyName info 2024-11-14T03:57:14,893 DEBUG [StoreOpener-60b27abd813b114b2fcbad5e2f02d67c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:57:14,893 INFO [StoreOpener-60b27abd813b114b2fcbad5e2f02d67c-1 {}] regionserver.HStore(327): Store=60b27abd813b114b2fcbad5e2f02d67c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:57:14,894 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,895 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,895 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,895 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,896 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,898 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,900 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:57:14,901 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 60b27abd813b114b2fcbad5e2f02d67c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741998, jitterRate=-0.05650182068347931}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:57:14,901 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:57:14,901 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 60b27abd813b114b2fcbad5e2f02d67c: Running coprocessor pre-open hook at 1731556634890Writing region info on filesystem at 1731556634890Initializing all the Stores at 1731556634891 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556634891Cleaning up temporary data from old regions at 1731556634896 (+5 ms)Running coprocessor post-open hooks at 1731556634901 (+5 ms)Region opened successfully at 1731556634901 2024-11-14T03:57:14,902 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c., pid=6, masterSystemTime=1731556634883 2024-11-14T03:57:14,905 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,906 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:14,907 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=60b27abd813b114b2fcbad5e2f02d67c, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,38181,1731556632777 2024-11-14T03:57:14,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 60b27abd813b114b2fcbad5e2f02d67c, server=6a989be77d39,38181,1731556632777 because future has completed 2024-11-14T03:57:14,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T03:57:14,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 60b27abd813b114b2fcbad5e2f02d67c, server=6a989be77d39,38181,1731556632777 in 181 msec 2024-11-14T03:57:14,919 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T03:57:14,919 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=60b27abd813b114b2fcbad5e2f02d67c, ASSIGN in 341 msec 2024-11-14T03:57:14,921 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T03:57:14,921 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556634921"}]},"ts":"1731556634921"} 2024-11-14T03:57:14,924 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-14T03:57:14,925 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T03:57:14,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 810 msec 2024-11-14T03:57:15,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:15,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:16,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:16,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:16,377 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:57:16,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:16,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:57:17,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:17,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:18,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:18,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:19,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:19,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:19,427 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T03:57:19,428 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-14T03:57:20,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:20,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:20,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:57:20,874 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T03:57:20,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:57:20,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T03:57:20,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T03:57:20,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T03:57:20,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:20,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T03:57:21,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:21,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:22,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:22,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:23,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:23,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:57:24,142 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T03:57:24,142 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-14T03:57:24,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:24,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:24,149 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c., hostname=6a989be77d39,38181,1731556632777, seqNum=2] 2024-11-14T03:57:24,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:24,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:24,164 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T03:57:24,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T03:57:24,165 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T03:57:24,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T03:57:24,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:24,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:24,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38181 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-14T03:57:24,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:24,332 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 60b27abd813b114b2fcbad5e2f02d67c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T03:57:24,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/e20dc6e751e34840ae7d48a212ea693c is 1080, key is row0001/info:/1731556644150/Put/seqid=0 2024-11-14T03:57:24,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741837_1013 (size=6033) 2024-11-14T03:57:24,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741837_1013 (size=6033) 2024-11-14T03:57:24,777 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/e20dc6e751e34840ae7d48a212ea693c 2024-11-14T03:57:24,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/e20dc6e751e34840ae7d48a212ea693c as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/e20dc6e751e34840ae7d48a212ea693c 2024-11-14T03:57:24,792 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/e20dc6e751e34840ae7d48a212ea693c, entries=1, sequenceid=5, filesize=5.9 K 2024-11-14T03:57:24,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 60b27abd813b114b2fcbad5e2f02d67c in 462ms, sequenceid=5, compaction requested=false 2024-11-14T03:57:24,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 60b27abd813b114b2fcbad5e2f02d67c: 2024-11-14T03:57:24,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:24,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-14T03:57:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-14T03:57:24,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T03:57:24,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 633 msec 2024-11-14T03:57:24,806 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 646 msec 2024-11-14T03:57:25,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:25,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:26,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:26,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:27,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:27,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:28,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:28,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:29,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:29,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:30,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:30,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:31,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:31,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:32,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:32,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:33,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:33,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:34,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:34,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:34,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T03:57:34,231 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T03:57:34,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:34,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:34,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T03:57:34,238 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T03:57:34,239 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T03:57:34,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T03:57:34,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38181 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-14T03:57:34,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:34,393 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 60b27abd813b114b2fcbad5e2f02d67c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T03:57:34,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/65e105da036e4b33ae197b5d0e7fc37c is 1080, key is row0002/info:/1731556654233/Put/seqid=0 2024-11-14T03:57:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741838_1014 (size=6033) 2024-11-14T03:57:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741838_1014 (size=6033) 2024-11-14T03:57:34,422 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/65e105da036e4b33ae197b5d0e7fc37c 2024-11-14T03:57:34,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/65e105da036e4b33ae197b5d0e7fc37c as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/65e105da036e4b33ae197b5d0e7fc37c 2024-11-14T03:57:34,449 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/65e105da036e4b33ae197b5d0e7fc37c, entries=1, sequenceid=9, filesize=5.9 K 2024-11-14T03:57:34,450 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 60b27abd813b114b2fcbad5e2f02d67c in 57ms, sequenceid=9, compaction requested=false 2024-11-14T03:57:34,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 60b27abd813b114b2fcbad5e2f02d67c: 2024-11-14T03:57:34,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:34,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-14T03:57:34,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-14T03:57:34,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-14T03:57:34,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 215 msec 2024-11-14T03:57:34,468 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 230 msec 2024-11-14T03:57:35,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:35,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:36,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:36,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:37,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:37,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:38,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:38,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:38,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 after 68056ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:57:38,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta after 68049ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:57:39,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:39,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:40,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:40,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:41,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:41,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:42,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:42,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:42,709 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:57:43,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:43,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:44,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:44,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:44,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T03:57:44,337 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T03:57:44,341 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C38181%2C1731556632777.1731556664341 2024-11-14T03:57:44,375 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:44,375 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:44,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:44,375 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:44,376 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:44,376 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556633582 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556664341 2024-11-14T03:57:44,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741833_1009 (size=5546) 2024-11-14T03:57:44,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741833_1009 (size=5546) 2024-11-14T03:57:44,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40181:40181),(127.0.0.1/127.0.0.1:35525:35525)] 2024-11-14T03:57:44,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:44,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:44,417 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T03:57:44,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T03:57:44,418 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T03:57:44,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T03:57:44,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38181 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-14T03:57:44,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:44,574 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 60b27abd813b114b2fcbad5e2f02d67c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T03:57:44,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/5af1618c3df342fa877d9629de11ca88 is 1080, key is row0003/info:/1731556664339/Put/seqid=0 2024-11-14T03:57:44,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741840_1016 (size=6033) 2024-11-14T03:57:44,658 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/5af1618c3df342fa877d9629de11ca88 2024-11-14T03:57:44,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741840_1016 (size=6033) 2024-11-14T03:57:44,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/5af1618c3df342fa877d9629de11ca88 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/5af1618c3df342fa877d9629de11ca88 2024-11-14T03:57:44,689 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/5af1618c3df342fa877d9629de11ca88, entries=1, sequenceid=13, filesize=5.9 K 2024-11-14T03:57:44,691 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 60b27abd813b114b2fcbad5e2f02d67c in 118ms, sequenceid=13, compaction requested=true 2024-11-14T03:57:44,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 60b27abd813b114b2fcbad5e2f02d67c: 2024-11-14T03:57:44,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:44,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-14T03:57:44,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-14T03:57:44,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-14T03:57:44,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 276 msec 2024-11-14T03:57:44,700 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 285 msec 2024-11-14T03:57:45,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:45,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:46,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:46,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:47,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:47,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:48,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:48,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:49,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:49,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:50,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:50,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:51,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:51,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:52,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:52,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:53,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:53,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:54,061 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T03:57:54,061 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T03:57:54,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:54,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:54,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T03:57:54,491 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T03:57:54,491 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:57:54,493 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:57:54,493 DEBUG [Time-limited test {}] regionserver.HStore(1541): 60b27abd813b114b2fcbad5e2f02d67c/info is initiating minor compaction (all files) 2024-11-14T03:57:54,493 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:57:54,493 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:57:54,493 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 60b27abd813b114b2fcbad5e2f02d67c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:54,493 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/e20dc6e751e34840ae7d48a212ea693c, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/65e105da036e4b33ae197b5d0e7fc37c, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/5af1618c3df342fa877d9629de11ca88] into tmpdir=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp, totalSize=17.7 K 2024-11-14T03:57:54,494 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e20dc6e751e34840ae7d48a212ea693c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731556644150 2024-11-14T03:57:54,494 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 65e105da036e4b33ae197b5d0e7fc37c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731556654233 2024-11-14T03:57:54,495 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5af1618c3df342fa877d9629de11ca88, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731556664339 2024-11-14T03:57:54,508 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 60b27abd813b114b2fcbad5e2f02d67c#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:57:54,509 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/9d849472c3134469add23b209de993f0 is 1080, key is row0001/info:/1731556644150/Put/seqid=0 2024-11-14T03:57:54,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741841_1017 (size=8296) 2024-11-14T03:57:54,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741841_1017 (size=8296) 2024-11-14T03:57:54,923 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/9d849472c3134469add23b209de993f0 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/9d849472c3134469add23b209de993f0 2024-11-14T03:57:54,930 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 60b27abd813b114b2fcbad5e2f02d67c/info of 60b27abd813b114b2fcbad5e2f02d67c into 9d849472c3134469add23b209de993f0(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:57:54,930 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 60b27abd813b114b2fcbad5e2f02d67c: 2024-11-14T03:57:54,933 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C38181%2C1731556632777.1731556674933 2024-11-14T03:57:54,946 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:54,946 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:54,946 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:54,946 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:54,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:57:54,947 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556664341 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556674933 2024-11-14T03:57:54,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35525:35525),(127.0.0.1/127.0.0.1:40181:40181)] 2024-11-14T03:57:54,948 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556664341 is not closed yet, will try archiving it next time 2024-11-14T03:57:54,948 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556633582 to hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/oldWALs/6a989be77d39%2C38181%2C1731556632777.1731556633582 2024-11-14T03:57:54,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741839_1015 (size=2520) 2024-11-14T03:57:54,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:54,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741839_1015 (size=2520) 2024-11-14T03:57:54,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:57:54,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T03:57:54,952 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T03:57:54,953 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T03:57:54,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T03:57:55,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38181 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-14T03:57:55,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:55,107 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 60b27abd813b114b2fcbad5e2f02d67c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T03:57:55,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/305431fbf0a54b99bdfb29c30ec3b877 is 1080, key is row0000/info:/1731556674932/Put/seqid=0 2024-11-14T03:57:55,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741843_1019 (size=6033) 2024-11-14T03:57:55,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741843_1019 (size=6033) 2024-11-14T03:57:55,119 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/305431fbf0a54b99bdfb29c30ec3b877 2024-11-14T03:57:55,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/305431fbf0a54b99bdfb29c30ec3b877 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/305431fbf0a54b99bdfb29c30ec3b877 2024-11-14T03:57:55,131 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/305431fbf0a54b99bdfb29c30ec3b877, entries=1, sequenceid=18, filesize=5.9 K 2024-11-14T03:57:55,132 INFO [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 60b27abd813b114b2fcbad5e2f02d67c in 25ms, sequenceid=18, compaction requested=false 2024-11-14T03:57:55,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 60b27abd813b114b2fcbad5e2f02d67c: 2024-11-14T03:57:55,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:57:55,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-14T03:57:55,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-14T03:57:55,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-14T03:57:55,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-14T03:57:55,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-14T03:57:55,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:55,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:56,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:56,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:57,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:57,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:58,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:58,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:59,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:59,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:57:59,889 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 60b27abd813b114b2fcbad5e2f02d67c, had cached 0 bytes from a total of 14329 2024-11-14T03:58:00,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:00,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:01,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:01,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:02,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:02,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:03,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:03,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:04,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:04,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40453 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T03:58:04,962 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T03:58:04,966 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C38181%2C1731556632777.1731556684966 2024-11-14T03:58:05,022 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,022 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,022 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,029 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,030 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,030 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556674933 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556684966 2024-11-14T03:58:05,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741842_1018 (size=2026) 2024-11-14T03:58:05,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741842_1018 (size=2026) 2024-11-14T03:58:05,033 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/WALs/6a989be77d39,38181,1731556632777/6a989be77d39%2C38181%2C1731556632777.1731556664341 to hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/oldWALs/6a989be77d39%2C38181%2C1731556632777.1731556664341 2024-11-14T03:58:05,079 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35525:35525),(127.0.0.1/127.0.0.1:40181:40181)] 2024-11-14T03:58:05,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:58:05,079 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:58:05,080 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:58:05,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:58:05,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:58:05,080 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:58:05,080 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=916171182, stopped=false 2024-11-14T03:58:05,080 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,40453,1731556632726 2024-11-14T03:58:05,080 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:58:05,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:58:05,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:05,089 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:58:05,090 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:58:05,090 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:58:05,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:58:05,090 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,38181,1731556632777' ***** 2024-11-14T03:58:05,090 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:58:05,091 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:58:05,091 INFO [RS:0;6a989be77d39:38181 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:58:05,091 INFO [RS:0;6a989be77d39:38181 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:58:05,091 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(3091): Received CLOSE for 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:58:05,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:58:05,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:05,093 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:58:05,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:58:05,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:58:05,105 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,38181,1731556632777 2024-11-14T03:58:05,105 INFO [RS:0;6a989be77d39:38181 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:58:05,106 INFO [RS:0;6a989be77d39:38181 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:38181. 2024-11-14T03:58:05,106 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 60b27abd813b114b2fcbad5e2f02d67c, disabling compactions & flushes 2024-11-14T03:58:05,106 DEBUG [RS:0;6a989be77d39:38181 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:58:05,106 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:58:05,106 DEBUG [RS:0;6a989be77d39:38181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:58:05,106 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:58:05,106 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. after waiting 0 ms 2024-11-14T03:58:05,106 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:58:05,106 INFO [RS:0;6a989be77d39:38181 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:58:05,106 INFO [RS:0;6a989be77d39:38181 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:58:05,106 INFO [RS:0;6a989be77d39:38181 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:58:05,106 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:58:05,106 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 60b27abd813b114b2fcbad5e2f02d67c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T03:58:05,106 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T03:58:05,107 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 60b27abd813b114b2fcbad5e2f02d67c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.} 2024-11-14T03:58:05,107 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:58:05,107 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:58:05,107 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:58:05,107 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:58:05,108 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:58:05,108 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:58:05,108 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-14T03:58:05,115 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/937293c443894401895e93f72040dc08 is 1080, key is row0001/info:/1731556684964/Put/seqid=0 2024-11-14T03:58:05,146 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/info/cd1d2bc4c7aa48d58569251cbeeeae39 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c./info:regioninfo/1731556634906/Put/seqid=0 2024-11-14T03:58:05,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741845_1021 (size=6033) 2024-11-14T03:58:05,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741845_1021 (size=6033) 2024-11-14T03:58:05,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741846_1022 (size=7308) 2024-11-14T03:58:05,222 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/info/cd1d2bc4c7aa48d58569251cbeeeae39 2024-11-14T03:58:05,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741846_1022 (size=7308) 2024-11-14T03:58:05,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:05,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:05,307 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/ns/abb748bcc3d34ece8c9851117243c849 is 43, key is default/ns:d/1731556634047/Put/seqid=0 2024-11-14T03:58:05,307 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:58:05,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741847_1023 (size=5153) 2024-11-14T03:58:05,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741847_1023 (size=5153) 2024-11-14T03:58:05,336 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/ns/abb748bcc3d34ece8c9851117243c849 2024-11-14T03:58:05,373 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/table/97e7cdc48c5544719c94a9eaf652e368 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731556634921/Put/seqid=0 2024-11-14T03:58:05,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741848_1024 (size=5508) 2024-11-14T03:58:05,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741848_1024 (size=5508) 2024-11-14T03:58:05,401 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/table/97e7cdc48c5544719c94a9eaf652e368 2024-11-14T03:58:05,417 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/info/cd1d2bc4c7aa48d58569251cbeeeae39 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/info/cd1d2bc4c7aa48d58569251cbeeeae39 2024-11-14T03:58:05,433 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/info/cd1d2bc4c7aa48d58569251cbeeeae39, entries=10, sequenceid=11, filesize=7.1 K 2024-11-14T03:58:05,434 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/ns/abb748bcc3d34ece8c9851117243c849 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/ns/abb748bcc3d34ece8c9851117243c849 2024-11-14T03:58:05,438 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:58:05,442 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/ns/abb748bcc3d34ece8c9851117243c849, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T03:58:05,444 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/.tmp/table/97e7cdc48c5544719c94a9eaf652e368 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/table/97e7cdc48c5544719c94a9eaf652e368 2024-11-14T03:58:05,462 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/table/97e7cdc48c5544719c94a9eaf652e368, entries=2, sequenceid=11, filesize=5.4 K 2024-11-14T03:58:05,465 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 357ms, sequenceid=11, compaction requested=false 2024-11-14T03:58:05,494 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T03:58:05,495 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:58:05,495 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:58:05,495 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556685107Running coprocessor pre-close hooks at 1731556685107Disabling compacts and flushes for region at 1731556685107Disabling writes for close at 1731556685108 (+1 ms)Obtaining lock to block concurrent updates at 1731556685108Preparing flush snapshotting stores in 1588230740 at 1731556685108Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731556685108Flushing stores of hbase:meta,,1.1588230740 at 1731556685110 (+2 ms)Flushing 1588230740/info: creating writer at 1731556685110Flushing 1588230740/info: appending metadata at 1731556685146 (+36 ms)Flushing 1588230740/info: closing flushed file at 1731556685146Flushing 1588230740/ns: creating writer at 1731556685251 (+105 ms)Flushing 1588230740/ns: appending metadata at 1731556685305 (+54 ms)Flushing 1588230740/ns: closing flushed file at 1731556685305Flushing 1588230740/table: creating writer at 1731556685348 (+43 ms)Flushing 1588230740/table: appending metadata at 1731556685373 (+25 ms)Flushing 1588230740/table: closing flushed file at 1731556685373Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@478ff1e6: reopening flushed file at 1731556685413 (+40 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@715819b7: reopening flushed file at 1731556685433 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7faaa922: reopening flushed file at 1731556685442 (+9 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 357ms, sequenceid=11, compaction requested=false at 1731556685465 (+23 ms)Writing region close event to WAL at 1731556685481 (+16 ms)Running coprocessor post-close hooks at 1731556685494 (+13 ms)Closed at 1731556685495 (+1 ms) 2024-11-14T03:58:05,495 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:58:05,507 DEBUG [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1351): Waiting on 60b27abd813b114b2fcbad5e2f02d67c 2024-11-14T03:58:05,530 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T03:58:05,530 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T03:58:05,569 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/937293c443894401895e93f72040dc08 2024-11-14T03:58:05,577 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/.tmp/info/937293c443894401895e93f72040dc08 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/937293c443894401895e93f72040dc08 2024-11-14T03:58:05,590 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/937293c443894401895e93f72040dc08, entries=1, sequenceid=22, filesize=5.9 K 2024-11-14T03:58:05,594 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 60b27abd813b114b2fcbad5e2f02d67c in 488ms, sequenceid=22, compaction requested=true 2024-11-14T03:58:05,613 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/e20dc6e751e34840ae7d48a212ea693c, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/65e105da036e4b33ae197b5d0e7fc37c, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/5af1618c3df342fa877d9629de11ca88] to archive 2024-11-14T03:58:05,615 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T03:58:05,619 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/e20dc6e751e34840ae7d48a212ea693c to hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/e20dc6e751e34840ae7d48a212ea693c 2024-11-14T03:58:05,621 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/65e105da036e4b33ae197b5d0e7fc37c to hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/65e105da036e4b33ae197b5d0e7fc37c 2024-11-14T03:58:05,623 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/5af1618c3df342fa877d9629de11ca88 to hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/info/5af1618c3df342fa877d9629de11ca88 2024-11-14T03:58:05,623 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6a989be77d39:40453 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T03:58:05,623 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e20dc6e751e34840ae7d48a212ea693c=6033, 65e105da036e4b33ae197b5d0e7fc37c=6033, 5af1618c3df342fa877d9629de11ca88=6033] 2024-11-14T03:58:05,629 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/60b27abd813b114b2fcbad5e2f02d67c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-14T03:58:05,630 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:58:05,630 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 60b27abd813b114b2fcbad5e2f02d67c: Waiting for close lock at 1731556685105Running coprocessor pre-close hooks at 1731556685105Disabling compacts and flushes for region at 1731556685105Disabling writes for close at 1731556685106 (+1 ms)Obtaining lock to block concurrent updates at 1731556685106Preparing flush snapshotting stores in 60b27abd813b114b2fcbad5e2f02d67c at 1731556685106Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731556685106Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. at 1731556685107 (+1 ms)Flushing 60b27abd813b114b2fcbad5e2f02d67c/info: creating writer at 1731556685108 (+1 ms)Flushing 60b27abd813b114b2fcbad5e2f02d67c/info: appending metadata at 1731556685115 (+7 ms)Flushing 60b27abd813b114b2fcbad5e2f02d67c/info: closing flushed file at 1731556685115Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4823fd27: reopening flushed file at 1731556685576 (+461 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 60b27abd813b114b2fcbad5e2f02d67c in 488ms, sequenceid=22, compaction requested=true at 1731556685594 (+18 ms)Writing region close event to WAL at 1731556685624 (+30 ms)Running coprocessor post-close hooks at 1731556685630 (+6 ms)Closed at 1731556685630 2024-11-14T03:58:05,630 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731556634114.60b27abd813b114b2fcbad5e2f02d67c. 2024-11-14T03:58:05,707 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,38181,1731556632777; all regions closed. 2024-11-14T03:58:05,708 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,708 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,708 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,709 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,709 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741834_1010 (size=3306) 2024-11-14T03:58:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741834_1010 (size=3306) 2024-11-14T03:58:06,118 DEBUG [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/oldWALs 2024-11-14T03:58:06,118 INFO [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C38181%2C1731556632777.meta:.meta(num 1731556634003) 2024-11-14T03:58:06,119 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,119 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,119 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,119 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741844_1020 (size=1252) 2024-11-14T03:58:06,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741844_1020 (size=1252) 2024-11-14T03:58:06,127 DEBUG [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/oldWALs 2024-11-14T03:58:06,127 INFO [RS:0;6a989be77d39:38181 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C38181%2C1731556632777:(num 1731556684966) 2024-11-14T03:58:06,127 DEBUG [RS:0;6a989be77d39:38181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:58:06,127 INFO [RS:0;6a989be77d39:38181 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:58:06,127 INFO [RS:0;6a989be77d39:38181 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:58:06,127 INFO [RS:0;6a989be77d39:38181 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T03:58:06,127 INFO [RS:0;6a989be77d39:38181 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:58:06,127 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:58:06,128 INFO [RS:0;6a989be77d39:38181 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38181 2024-11-14T03:58:06,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:58:06,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,38181,1731556632777 2024-11-14T03:58:06,131 INFO [RS:0;6a989be77d39:38181 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:58:06,139 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,38181,1731556632777] 2024-11-14T03:58:06,141 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,38181,1731556632777 already deleted, retry=false 2024-11-14T03:58:06,141 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,38181,1731556632777 expired; onlineServers=0 2024-11-14T03:58:06,141 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,40453,1731556632726' ***** 2024-11-14T03:58:06,141 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:58:06,141 INFO [M:0;6a989be77d39:40453 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:58:06,141 INFO [M:0;6a989be77d39:40453 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:58:06,141 DEBUG [M:0;6a989be77d39:40453 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:58:06,142 DEBUG [M:0;6a989be77d39:40453 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:58:06,142 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556633385 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556633385,5,FailOnTimeoutGroup] 2024-11-14T03:58:06,142 INFO [M:0;6a989be77d39:40453 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:58:06,142 INFO [M:0;6a989be77d39:40453 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:58:06,142 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556633385 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556633385,5,FailOnTimeoutGroup] 2024-11-14T03:58:06,142 DEBUG [M:0;6a989be77d39:40453 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:58:06,142 INFO [M:0;6a989be77d39:40453 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:58:06,142 INFO [M:0;6a989be77d39:40453 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:58:06,142 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:58:06,142 INFO [M:0;6a989be77d39:40453 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:58:06,142 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:58:06,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:58:06,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:06,149 DEBUG [M:0;6a989be77d39:40453 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-14T03:58:06,149 DEBUG [M:0;6a989be77d39:40453 {}] master.ActiveMasterManager(353): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-14T03:58:06,150 INFO [M:0;6a989be77d39:40453 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/.lastflushedseqids 2024-11-14T03:58:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741849_1025 (size=130) 2024-11-14T03:58:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741849_1025 (size=130) 2024-11-14T03:58:06,166 INFO [M:0;6a989be77d39:40453 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:58:06,166 INFO [M:0;6a989be77d39:40453 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:58:06,166 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:58:06,166 INFO [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:06,166 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:06,166 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:58:06,166 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:06,167 INFO [M:0;6a989be77d39:40453 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-14T03:58:06,195 DEBUG [M:0;6a989be77d39:40453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/777699976faf40dab8dcd65eb01ac1f0 is 82, key is hbase:meta,,1/info:regioninfo/1731556634031/Put/seqid=0 2024-11-14T03:58:06,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741850_1026 (size=5672) 2024-11-14T03:58:06,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741850_1026 (size=5672) 2024-11-14T03:58:06,220 INFO [M:0;6a989be77d39:40453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/777699976faf40dab8dcd65eb01ac1f0 2024-11-14T03:58:06,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:06,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:06,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:58:06,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38181-0x100ef645d310001, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:58:06,239 INFO [RS:0;6a989be77d39:38181 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:58:06,239 INFO [RS:0;6a989be77d39:38181 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,38181,1731556632777; zookeeper connection closed. 2024-11-14T03:58:06,249 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6ed55931 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6ed55931 2024-11-14T03:58:06,250 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T03:58:06,251 DEBUG [M:0;6a989be77d39:40453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f7aa7e48a6d04e61a6c1053a35612b4b is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731556634927/Put/seqid=0 2024-11-14T03:58:06,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741851_1027 (size=7824) 2024-11-14T03:58:06,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741851_1027 (size=7824) 2024-11-14T03:58:06,278 INFO [M:0;6a989be77d39:40453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f7aa7e48a6d04e61a6c1053a35612b4b 2024-11-14T03:58:06,285 INFO [M:0;6a989be77d39:40453 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f7aa7e48a6d04e61a6c1053a35612b4b 2024-11-14T03:58:06,309 DEBUG [M:0;6a989be77d39:40453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f565fcee85e469cb607e9aef33f053d is 69, key is 6a989be77d39,38181,1731556632777/rs:state/1731556633419/Put/seqid=0 2024-11-14T03:58:06,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741852_1028 (size=5156) 2024-11-14T03:58:06,326 INFO [M:0;6a989be77d39:40453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f565fcee85e469cb607e9aef33f053d 2024-11-14T03:58:06,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741852_1028 (size=5156) 2024-11-14T03:58:06,374 DEBUG [M:0;6a989be77d39:40453 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c21bea08f50349de827643de2c4bb1a6 is 52, key is load_balancer_on/state:d/1731556634107/Put/seqid=0 2024-11-14T03:58:06,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741853_1029 (size=5056) 2024-11-14T03:58:06,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741853_1029 (size=5056) 2024-11-14T03:58:06,384 INFO [M:0;6a989be77d39:40453 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c21bea08f50349de827643de2c4bb1a6 2024-11-14T03:58:06,391 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/777699976faf40dab8dcd65eb01ac1f0 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/777699976faf40dab8dcd65eb01ac1f0 2024-11-14T03:58:06,397 INFO [M:0;6a989be77d39:40453 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/777699976faf40dab8dcd65eb01ac1f0, entries=8, sequenceid=121, filesize=5.5 K 2024-11-14T03:58:06,399 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f7aa7e48a6d04e61a6c1053a35612b4b as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f7aa7e48a6d04e61a6c1053a35612b4b 2024-11-14T03:58:06,405 INFO [M:0;6a989be77d39:40453 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f7aa7e48a6d04e61a6c1053a35612b4b 2024-11-14T03:58:06,405 INFO [M:0;6a989be77d39:40453 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f7aa7e48a6d04e61a6c1053a35612b4b, entries=14, sequenceid=121, filesize=7.6 K 2024-11-14T03:58:06,406 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f565fcee85e469cb607e9aef33f053d as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8f565fcee85e469cb607e9aef33f053d 2024-11-14T03:58:06,418 INFO [M:0;6a989be77d39:40453 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8f565fcee85e469cb607e9aef33f053d, entries=1, sequenceid=121, filesize=5.0 K 2024-11-14T03:58:06,419 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c21bea08f50349de827643de2c4bb1a6 as hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c21bea08f50349de827643de2c4bb1a6 2024-11-14T03:58:06,425 INFO [M:0;6a989be77d39:40453 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42027/user/jenkins/test-data/b7053cab-8c86-72a5-cfe9-d3244c3f373b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c21bea08f50349de827643de2c4bb1a6, entries=1, sequenceid=121, filesize=4.9 K 2024-11-14T03:58:06,426 INFO [M:0;6a989be77d39:40453 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 260ms, sequenceid=121, compaction requested=false 2024-11-14T03:58:06,427 INFO [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:06,428 DEBUG [M:0;6a989be77d39:40453 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556686166Disabling compacts and flushes for region at 1731556686166Disabling writes for close at 1731556686166Obtaining lock to block concurrent updates at 1731556686167 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556686167Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731556686167Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556686169 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556686169Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556686193 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556686193Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556686227 (+34 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556686250 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556686250Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556686285 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556686309 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556686309Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556686342 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556686373 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556686373Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c9e0d0a: reopening flushed file at 1731556686389 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@442d4115: reopening flushed file at 1731556686397 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15f51c96: reopening flushed file at 1731556686405 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e151102: reopening flushed file at 1731556686418 (+13 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 260ms, sequenceid=121, compaction requested=false at 1731556686426 (+8 ms)Writing region close event to WAL at 1731556686427 (+1 ms)Closed at 1731556686427 2024-11-14T03:58:06,428 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,428 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,429 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,429 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,429 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:58:06,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33565 is added to blk_1073741830_1006 (size=53047) 2024-11-14T03:58:06,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39077 is added to blk_1073741830_1006 (size=53047) 2024-11-14T03:58:06,432 INFO [M:0;6a989be77d39:40453 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:58:06,432 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:58:06,432 INFO [M:0;6a989be77d39:40453 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40453 2024-11-14T03:58:06,432 INFO [M:0;6a989be77d39:40453 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:58:06,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:58:06,535 INFO [M:0;6a989be77d39:40453 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:58:06,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40453-0x100ef645d310000, quorum=127.0.0.1:54988, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:58:06,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bd1231f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:58:06,539 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fa194e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:58:06,539 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:58:06,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ecaedd6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:58:06,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7101d128{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir/,STOPPED} 2024-11-14T03:58:06,543 WARN [BP-1861591780-172.17.0.2-1731556632011 heartbeating to localhost/127.0.0.1:42027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:58:06,543 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:58:06,543 WARN [BP-1861591780-172.17.0.2-1731556632011 heartbeating to localhost/127.0.0.1:42027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1861591780-172.17.0.2-1731556632011 (Datanode Uuid e4dc1f39-5d39-4004-afb5-8f56cc289eaa) service to localhost/127.0.0.1:42027 2024-11-14T03:58:06,543 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:58:06,544 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data3/current/BP-1861591780-172.17.0.2-1731556632011 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:58:06,544 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data4/current/BP-1861591780-172.17.0.2-1731556632011 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:58:06,544 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:58:06,552 WARN [BP-1861591780-172.17.0.2-1731556632011 heartbeating to localhost/127.0.0.1:42027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1861591780-172.17.0.2-1731556632011 (Datanode Uuid 1ea4d249-0825-402b-a1a1-9299b27eb1e9) service to localhost/127.0.0.1:42027 2024-11-14T03:58:06,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data1/current/BP-1861591780-172.17.0.2-1731556632011 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:58:06,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/cluster_95fb4cef-9078-8c69-dd29-4b4d4bcaf7b9/data/data2/current/BP-1861591780-172.17.0.2-1731556632011 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:58:06,554 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78617008{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:58:06,554 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fa18b90{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:58:06,554 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:58:06,554 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a3c93e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:58:06,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@521506bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir/,STOPPED} 2024-11-14T03:58:06,558 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:58:06,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22e47138{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:58:06,567 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@232381c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:58:06,567 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:58:06,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b7f365e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:58:06,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56673fe9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir/,STOPPED} 2024-11-14T03:58:06,577 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:58:06,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:58:06,614 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:42027 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:42027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:42027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=481 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=573 (was 436) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4639 (was 5463) 2024-11-14T03:58:06,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=481, MaxFileDescriptor=1048576, SystemLoadAverage=573, ProcessCount=11, AvailableMemoryMB=4640 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.log.dir so I do NOT create it in target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/545efa3b-a12c-308a-a61d-a0efcca4d4aa/hadoop.tmp.dir so I do NOT create it in target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715, deleteOnExit=true 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/test.cache.data in system properties and HBase conf 2024-11-14T03:58:06,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:58:06,630 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:58:06,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:58:06,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:58:06,649 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:58:06,719 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:58:06,724 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:58:06,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:58:06,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:58:06,726 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:58:06,727 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:58:06,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11e6950{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:58:06,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28903d6d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:58:06,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a0ef025{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/java.io.tmpdir/jetty-localhost-41257-hadoop-hdfs-3_4_1-tests_jar-_-any-4596383552767280448/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:58:06,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47a053ae{HTTP/1.1, (http/1.1)}{localhost:41257} 2024-11-14T03:58:06,895 INFO [Time-limited test {}] server.Server(415): Started @242907ms 2024-11-14T03:58:06,914 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:58:07,092 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:58:07,095 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:58:07,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:58:07,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:58:07,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:58:07,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cb7e6ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:58:07,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79b422de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:58:07,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:07,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:07,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d6e0cf8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/java.io.tmpdir/jetty-localhost-35667-hadoop-hdfs-3_4_1-tests_jar-_-any-6329051364315252819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:58:07,257 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34e466bb{HTTP/1.1, (http/1.1)}{localhost:35667} 2024-11-14T03:58:07,257 INFO [Time-limited test {}] server.Server(415): Started @243269ms 2024-11-14T03:58:07,258 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:58:07,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:58:07,393 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:58:07,400 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:58:07,401 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:58:07,401 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:58:07,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43909889{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:58:07,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@652d6e37{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:58:07,437 WARN [Thread-1946 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data1/current/BP-1496562693-172.17.0.2-1731556686657/current, will proceed with Du for space computation calculation, 2024-11-14T03:58:07,444 WARN [Thread-1947 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data2/current/BP-1496562693-172.17.0.2-1731556686657/current, will proceed with Du for space computation calculation, 2024-11-14T03:58:07,496 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:58:07,513 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4f2e583a493a134 with lease ID 0x77488e374c2deb00: Processing first storage report for DS-fc63bdea-06f9-4d53-b785-69dcf4f111fe from datanode DatanodeRegistration(127.0.0.1:40871, datanodeUuid=ea66442f-9a68-49e2-ab4e-c98b8063e361, infoPort=42019, infoSecurePort=0, ipcPort=37361, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657) 2024-11-14T03:58:07,514 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4f2e583a493a134 with lease ID 0x77488e374c2deb00: from storage DS-fc63bdea-06f9-4d53-b785-69dcf4f111fe node DatanodeRegistration(127.0.0.1:40871, datanodeUuid=ea66442f-9a68-49e2-ab4e-c98b8063e361, infoPort=42019, infoSecurePort=0, ipcPort=37361, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:58:07,514 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4f2e583a493a134 with lease ID 0x77488e374c2deb00: Processing first storage report for DS-beac96e3-34f3-433c-be92-3ddddbeb2410 from datanode DatanodeRegistration(127.0.0.1:40871, datanodeUuid=ea66442f-9a68-49e2-ab4e-c98b8063e361, infoPort=42019, infoSecurePort=0, ipcPort=37361, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657) 2024-11-14T03:58:07,514 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4f2e583a493a134 with lease ID 0x77488e374c2deb00: from storage DS-beac96e3-34f3-433c-be92-3ddddbeb2410 node DatanodeRegistration(127.0.0.1:40871, datanodeUuid=ea66442f-9a68-49e2-ab4e-c98b8063e361, infoPort=42019, infoSecurePort=0, ipcPort=37361, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:58:07,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f25c1cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/java.io.tmpdir/jetty-localhost-39203-hadoop-hdfs-3_4_1-tests_jar-_-any-3715083854681170352/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:58:07,555 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cfa2328{HTTP/1.1, (http/1.1)}{localhost:39203} 2024-11-14T03:58:07,555 INFO [Time-limited test {}] server.Server(415): Started @243568ms 2024-11-14T03:58:07,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:58:07,710 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data3/current/BP-1496562693-172.17.0.2-1731556686657/current, will proceed with Du for space computation calculation, 2024-11-14T03:58:07,718 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data4/current/BP-1496562693-172.17.0.2-1731556686657/current, will proceed with Du for space computation calculation, 2024-11-14T03:58:07,770 WARN [Thread-1961 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:58:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e13cea65a2e22b3 with lease ID 0x77488e374c2deb01: Processing first storage report for DS-36fbfff0-ada1-48d1-846d-6b27c2ae0401 from datanode DatanodeRegistration(127.0.0.1:38303, datanodeUuid=df3874e3-3dfe-49c6-b711-a17e6716c3c8, infoPort=37239, infoSecurePort=0, ipcPort=38039, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657) 2024-11-14T03:58:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e13cea65a2e22b3 with lease ID 0x77488e374c2deb01: from storage DS-36fbfff0-ada1-48d1-846d-6b27c2ae0401 node DatanodeRegistration(127.0.0.1:38303, datanodeUuid=df3874e3-3dfe-49c6-b711-a17e6716c3c8, infoPort=37239, infoSecurePort=0, ipcPort=38039, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:58:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e13cea65a2e22b3 with lease ID 0x77488e374c2deb01: Processing first storage report for DS-6b35c643-8cc6-4bd0-ad20-bba0b509ae01 from datanode DatanodeRegistration(127.0.0.1:38303, datanodeUuid=df3874e3-3dfe-49c6-b711-a17e6716c3c8, infoPort=37239, infoSecurePort=0, ipcPort=38039, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657) 2024-11-14T03:58:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e13cea65a2e22b3 with lease ID 0x77488e374c2deb01: from storage DS-6b35c643-8cc6-4bd0-ad20-bba0b509ae01 node DatanodeRegistration(127.0.0.1:38303, datanodeUuid=df3874e3-3dfe-49c6-b711-a17e6716c3c8, infoPort=37239, infoSecurePort=0, ipcPort=38039, storageInfo=lv=-57;cid=testClusterID;nsid=579703618;c=1731556686657), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:58:07,833 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243 2024-11-14T03:58:07,846 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/zookeeper_0, clientPort=64410, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:58:07,847 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64410 2024-11-14T03:58:07,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:07,848 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:07,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:58:07,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:58:07,903 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454 with version=8 2024-11-14T03:58:07,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:58:07,906 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:58:07,906 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:58:07,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:58:07,907 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:58:07,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:58:07,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:58:07,907 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:58:07,907 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:58:07,908 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46343 2024-11-14T03:58:07,909 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46343 connecting to ZooKeeper ensemble=127.0.0.1:64410 2024-11-14T03:58:07,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:463430x0, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:58:07,926 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46343-0x100ef6534890000 connected 2024-11-14T03:58:08,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:08,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:08,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:58:08,099 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454, hbase.cluster.distributed=false 2024-11-14T03:58:08,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:58:08,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46343 2024-11-14T03:58:08,137 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46343 2024-11-14T03:58:08,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46343 2024-11-14T03:58:08,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46343 2024-11-14T03:58:08,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46343 2024-11-14T03:58:08,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:08,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:08,247 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:58:08,247 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:58:08,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:58:08,248 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:58:08,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:58:08,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:58:08,248 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:58:08,248 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:58:08,257 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40421 2024-11-14T03:58:08,259 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40421 connecting to ZooKeeper ensemble=127.0.0.1:64410 2024-11-14T03:58:08,260 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:08,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:08,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404210x0, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:58:08,269 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:404210x0, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:58:08,270 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:58:08,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40421-0x100ef6534890001 connected 2024-11-14T03:58:08,293 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:58:08,294 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:58:08,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:58:08,329 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40421 2024-11-14T03:58:08,337 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40421 2024-11-14T03:58:08,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40421 2024-11-14T03:58:08,389 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40421 2024-11-14T03:58:08,399 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40421 2024-11-14T03:58:08,422 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:46343 2024-11-14T03:58:08,443 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,46343,1731556687906 2024-11-14T03:58:08,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:58:08,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:58:08,447 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,46343,1731556687906 2024-11-14T03:58:08,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:58:08,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:08,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:08,451 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:58:08,454 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,46343,1731556687906 from backup master directory 2024-11-14T03:58:08,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,46343,1731556687906 2024-11-14T03:58:08,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:58:08,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:58:08,457 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:58:08,457 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,46343,1731556687906 2024-11-14T03:58:08,468 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/hbase.id] with ID: 6340b022-5e0a-458d-b2b1-de6081e0faed 2024-11-14T03:58:08,468 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/.tmp/hbase.id 2024-11-14T03:58:08,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:58:08,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:58:08,511 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/.tmp/hbase.id]:[hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/hbase.id] 2024-11-14T03:58:08,535 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:08,535 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:58:08,536 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T03:58:08,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:08,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:08,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:58:08,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:58:08,990 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:58:08,991 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:58:08,993 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:58:09,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:58:09,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:58:09,029 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store 2024-11-14T03:58:09,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:58:09,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:58:09,065 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:09,065 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:58:09,065 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:09,065 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:09,065 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:58:09,065 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:09,065 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:58:09,065 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556689065Disabling compacts and flushes for region at 1731556689065Disabling writes for close at 1731556689065Writing region close event to WAL at 1731556689065Closed at 1731556689065 2024-11-14T03:58:09,066 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/.initializing 2024-11-14T03:58:09,066 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/WALs/6a989be77d39,46343,1731556687906 2024-11-14T03:58:09,086 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C46343%2C1731556687906, suffix=, logDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/WALs/6a989be77d39,46343,1731556687906, archiveDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/oldWALs, maxLogs=10 2024-11-14T03:58:09,086 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C46343%2C1731556687906.1731556689086 2024-11-14T03:58:09,117 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/WALs/6a989be77d39,46343,1731556687906/6a989be77d39%2C46343%2C1731556687906.1731556689086 2024-11-14T03:58:09,125 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:42019:42019)] 2024-11-14T03:58:09,126 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:58:09,126 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:09,126 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,126 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:58:09,135 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:09,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:58:09,137 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:58:09,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:58:09,139 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:58:09,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:58:09,177 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:58:09,179 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,180 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,181 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,190 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,190 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,191 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:58:09,192 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:58:09,195 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:58:09,195 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762160, jitterRate=-0.030863821506500244}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:58:09,196 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556689126Initializing all the Stores at 1731556689127 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556689127Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556689133 (+6 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556689133Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556689133Cleaning up temporary data from old regions at 1731556689190 (+57 ms)Region opened successfully at 1731556689196 (+6 ms) 2024-11-14T03:58:09,197 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:58:09,206 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ea92091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:58:09,207 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:58:09,208 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:58:09,208 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:58:09,208 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:58:09,209 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T03:58:09,209 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T03:58:09,210 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:58:09,222 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:58:09,224 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:58:09,225 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:58:09,226 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:58:09,227 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:58:09,228 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:58:09,228 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:58:09,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:09,234 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:58:09,235 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:58:09,236 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:58:09,241 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:58:09,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:09,246 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:58:09,249 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:58:09,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:58:09,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:09,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:58:09,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:09,256 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,46343,1731556687906, sessionid=0x100ef6534890000, setting cluster-up flag (Was=false) 2024-11-14T03:58:09,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:09,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:09,267 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:58:09,270 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,46343,1731556687906 2024-11-14T03:58:09,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:09,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:09,282 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:58:09,283 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,46343,1731556687906 2024-11-14T03:58:09,299 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:58:09,312 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(746): ClusterId : 6340b022-5e0a-458d-b2b1-de6081e0faed 2024-11-14T03:58:09,312 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:58:09,314 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:58:09,314 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:58:09,314 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:58:09,315 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,46343,1731556687906 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:58:09,317 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,318 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:58:09,318 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:58:09,333 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:58:09,333 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:58:09,339 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:58:09,340 DEBUG [RS:0;6a989be77d39:40421 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@559209ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:58:09,341 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,341 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:58:09,346 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556719346 2024-11-14T03:58:09,346 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:58:09,347 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:58:09,347 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:58:09,347 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:58:09,347 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:58:09,347 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:58:09,352 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,381 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:58:09,382 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:58:09,382 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:58:09,385 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:58:09,385 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:58:09,390 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556689385,5,FailOnTimeoutGroup] 2024-11-14T03:58:09,406 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:40421 2024-11-14T03:58:09,406 INFO [RS:0;6a989be77d39:40421 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:58:09,406 INFO [RS:0;6a989be77d39:40421 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:58:09,406 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:58:09,408 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,46343,1731556687906 with port=40421, startcode=1731556688247 2024-11-14T03:58:09,409 DEBUG [RS:0;6a989be77d39:40421 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:58:09,410 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556689390,5,FailOnTimeoutGroup] 2024-11-14T03:58:09,410 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,410 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:58:09,410 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,410 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,456 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42743, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:58:09,457 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46343 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,40421,1731556688247 2024-11-14T03:58:09,457 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46343 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,40421,1731556688247 2024-11-14T03:58:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:58:09,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:58:09,460 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:58:09,460 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454 2024-11-14T03:58:09,460 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454 2024-11-14T03:58:09,460 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38151 2024-11-14T03:58:09,460 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:58:09,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:58:09,470 DEBUG [RS:0;6a989be77d39:40421 {}] zookeeper.ZKUtil(111): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,40421,1731556688247 2024-11-14T03:58:09,470 WARN [RS:0;6a989be77d39:40421 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:58:09,470 INFO [RS:0;6a989be77d39:40421 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:58:09,470 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247 2024-11-14T03:58:09,478 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,40421,1731556688247] 2024-11-14T03:58:09,500 INFO [RS:0;6a989be77d39:40421 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:58:09,543 INFO [RS:0;6a989be77d39:40421 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:58:09,545 INFO [RS:0;6a989be77d39:40421 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:58:09,545 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:58:09,549 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:58:09,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:58:09,554 INFO [RS:0;6a989be77d39:40421 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:58:09,554 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,554 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,554 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,554 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,554 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,554 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,554 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:58:09,555 DEBUG [RS:0;6a989be77d39:40421 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:58:09,578 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,578 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,578 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,578 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,578 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,578 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40421,1731556688247-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:58:09,617 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:58:09,617 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,40421,1731556688247-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,618 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,618 INFO [RS:0;6a989be77d39:40421 {}] regionserver.Replication(171): 6a989be77d39,40421,1731556688247 started 2024-11-14T03:58:09,655 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:09,655 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,40421,1731556688247, RpcServer on 6a989be77d39/172.17.0.2:40421, sessionid=0x100ef6534890001 2024-11-14T03:58:09,655 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:58:09,655 DEBUG [RS:0;6a989be77d39:40421 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,40421,1731556688247 2024-11-14T03:58:09,655 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,40421,1731556688247' 2024-11-14T03:58:09,655 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:58:09,656 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:58:09,656 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:58:09,656 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:58:09,657 DEBUG [RS:0;6a989be77d39:40421 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,40421,1731556688247 2024-11-14T03:58:09,657 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,40421,1731556688247' 2024-11-14T03:58:09,657 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:58:09,658 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:58:09,659 DEBUG [RS:0;6a989be77d39:40421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:58:09,659 INFO [RS:0;6a989be77d39:40421 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:58:09,659 INFO [RS:0;6a989be77d39:40421 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:58:09,775 INFO [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C40421%2C1731556688247, suffix=, logDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247, archiveDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/oldWALs, maxLogs=32 2024-11-14T03:58:09,775 INFO [RS:0;6a989be77d39:40421 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C40421%2C1731556688247.1731556689775 2024-11-14T03:58:09,832 INFO [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556689775 2024-11-14T03:58:09,857 DEBUG [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:42019:42019)] 2024-11-14T03:58:09,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:09,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:58:09,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:58:09,951 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:09,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:58:09,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:58:09,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:09,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:58:09,968 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:58:09,968 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:09,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:58:09,971 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:58:09,971 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:09,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:09,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:58:09,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740 2024-11-14T03:58:09,973 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740 2024-11-14T03:58:09,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:58:09,974 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:58:09,975 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:58:09,980 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:58:09,986 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:58:09,987 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809631, jitterRate=0.029499709606170654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:58:09,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556689948Initializing all the Stores at 1731556689949 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556689949Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556689949Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556689949Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556689949Cleaning up temporary data from old regions at 1731556689974 (+25 ms)Region opened successfully at 1731556689988 (+14 ms) 2024-11-14T03:58:09,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:58:09,988 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:58:09,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:58:09,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:58:09,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:58:10,008 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:58:10,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556689988Disabling compacts and flushes for region at 1731556689988Disabling writes for close at 1731556689988Writing region close event to WAL at 1731556690008 (+20 ms)Closed at 1731556690008 2024-11-14T03:58:10,011 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:58:10,011 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:58:10,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:58:10,013 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:58:10,016 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:58:10,168 DEBUG [6a989be77d39:46343 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:58:10,169 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:10,172 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,40421,1731556688247, state=OPENING 2024-11-14T03:58:10,175 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:58:10,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:10,180 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:58:10,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:58:10,183 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:58:10,183 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:58:10,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,40421,1731556688247}] 2024-11-14T03:58:10,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:10,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:10,351 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:58:10,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44719, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:58:10,380 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:58:10,380 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:58:10,395 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C40421%2C1731556688247.meta, suffix=.meta, logDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247, archiveDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/oldWALs, maxLogs=32 2024-11-14T03:58:10,396 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C40421%2C1731556688247.meta.1731556690396.meta 2024-11-14T03:58:10,421 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.meta.1731556690396.meta 2024-11-14T03:58:10,477 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:42019:42019)] 2024-11-14T03:58:10,489 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:58:10,490 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:58:10,490 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:58:10,490 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:58:10,490 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:58:10,490 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:10,490 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:58:10,491 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:58:10,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:58:10,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:58:10,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:10,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:10,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:58:10,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:58:10,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:10,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:10,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:58:10,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:58:10,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:10,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:10,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:58:10,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:58:10,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:10,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:58:10,535 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:58:10,536 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740 2024-11-14T03:58:10,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:10,537 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740 2024-11-14T03:58:10,539 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:58:10,539 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:58:10,540 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:58:10,542 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:58:10,544 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812997, jitterRate=0.03377962112426758}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:58:10,544 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:58:10,545 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556690491Writing region info on filesystem at 1731556690491Initializing all the Stores at 1731556690492 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556690492Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556690498 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556690498Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556690498Cleaning up temporary data from old regions at 1731556690539 (+41 ms)Running coprocessor post-open hooks at 1731556690544 (+5 ms)Region opened successfully at 1731556690545 (+1 ms) 2024-11-14T03:58:10,547 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556690350 2024-11-14T03:58:10,554 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:58:10,554 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:58:10,559 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:10,562 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,40421,1731556688247, state=OPEN 2024-11-14T03:58:10,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:58:10,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:58:10,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:58:10,581 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,40421,1731556688247 2024-11-14T03:58:10,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:58:10,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:58:10,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,40421,1731556688247 in 398 msec 2024-11-14T03:58:10,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:58:10,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-14T03:58:10,622 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:58:10,622 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:58:10,626 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:58:10,626 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,40421,1731556688247, seqNum=-1] 2024-11-14T03:58:10,627 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:58:10,629 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35979, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:58:10,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3350 sec 2024-11-14T03:58:10,638 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556690638, completionTime=-1 2024-11-14T03:58:10,638 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:58:10,638 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556750641 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556810641 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46343,1731556687906-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46343,1731556687906-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46343,1731556687906-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:46343, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,641 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,642 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,645 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.190sec 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46343,1731556687906-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:58:10,647 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46343,1731556687906-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:58:10,651 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:58:10,651 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:58:10,651 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,46343,1731556687906-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:58:10,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9caeb33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:58:10,730 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,46343,-1 for getting cluster id 2024-11-14T03:58:10,730 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:58:10,739 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6340b022-5e0a-458d-b2b1-de6081e0faed' 2024-11-14T03:58:10,739 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:58:10,739 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6340b022-5e0a-458d-b2b1-de6081e0faed" 2024-11-14T03:58:10,740 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9eb32c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:58:10,740 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,46343,-1] 2024-11-14T03:58:10,740 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:58:10,740 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:58:10,742 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57104, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:58:10,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c3bfe4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:58:10,744 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:58:10,745 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,40421,1731556688247, seqNum=-1] 2024-11-14T03:58:10,745 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:58:10,746 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53488, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:58:10,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,46343,1731556687906 2024-11-14T03:58:10,751 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:58:10,754 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:58:10,754 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T03:58:10,756 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 6a989be77d39,46343,1731556687906 2024-11-14T03:58:10,756 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37746974 2024-11-14T03:58:10,756 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T03:58:10,759 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T03:58:10,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T03:58:10,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T03:58:10,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:58:10,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-14T03:58:10,764 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T03:58:10,764 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:10,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-14T03:58:10,765 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T03:58:10,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:58:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741835_1011 (size=381) 2024-11-14T03:58:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741835_1011 (size=381) 2024-11-14T03:58:10,819 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 817cba3f933a29fea91e92b5cfc2ff1c, NAME => 'TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454 2024-11-14T03:58:10,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741836_1012 (size=64) 2024-11-14T03:58:10,869 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:10,869 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 817cba3f933a29fea91e92b5cfc2ff1c, disabling compactions & flushes 2024-11-14T03:58:10,870 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:10,870 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:10,870 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. after waiting 0 ms 2024-11-14T03:58:10,870 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:10,870 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:10,870 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 817cba3f933a29fea91e92b5cfc2ff1c: Waiting for close lock at 1731556690869Disabling compacts and flushes for region at 1731556690869Disabling writes for close at 1731556690870 (+1 ms)Writing region close event to WAL at 1731556690870Closed at 1731556690870 2024-11-14T03:58:10,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741836_1012 (size=64) 2024-11-14T03:58:10,876 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T03:58:10,881 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731556690876"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556690876"}]},"ts":"1731556690876"} 2024-11-14T03:58:10,884 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T03:58:10,885 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T03:58:10,886 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556690885"}]},"ts":"1731556690885"} 2024-11-14T03:58:10,888 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-14T03:58:10,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, ASSIGN}] 2024-11-14T03:58:10,890 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, ASSIGN 2024-11-14T03:58:10,891 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, ASSIGN; state=OFFLINE, location=6a989be77d39,40421,1731556688247; forceNewPlan=false, retain=false 2024-11-14T03:58:11,044 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=817cba3f933a29fea91e92b5cfc2ff1c, regionState=OPENING, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:11,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, ASSIGN because future has completed 2024-11-14T03:58:11,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247}] 2024-11-14T03:58:11,054 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:58:11,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:11,177 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T03:58:11,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:11,241 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:11,241 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 817cba3f933a29fea91e92b5cfc2ff1c, NAME => 'TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:58:11,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:11,248 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,248 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:11,248 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,248 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,255 INFO [StoreOpener-817cba3f933a29fea91e92b5cfc2ff1c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,258 INFO [StoreOpener-817cba3f933a29fea91e92b5cfc2ff1c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 817cba3f933a29fea91e92b5cfc2ff1c columnFamilyName info 2024-11-14T03:58:11,273 DEBUG [StoreOpener-817cba3f933a29fea91e92b5cfc2ff1c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:11,274 INFO [StoreOpener-817cba3f933a29fea91e92b5cfc2ff1c-1 {}] regionserver.HStore(327): Store=817cba3f933a29fea91e92b5cfc2ff1c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:58:11,274 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,275 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,276 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,277 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,277 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,292 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,313 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:58:11,317 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 817cba3f933a29fea91e92b5cfc2ff1c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846555, jitterRate=0.07645086944103241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:58:11,317 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:11,318 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 817cba3f933a29fea91e92b5cfc2ff1c: Running coprocessor pre-open hook at 1731556691248Writing region info on filesystem at 1731556691248Initializing all the Stores at 1731556691252 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556691252Cleaning up temporary data from old regions at 1731556691277 (+25 ms)Running coprocessor post-open hooks at 1731556691317 (+40 ms)Region opened successfully at 1731556691318 (+1 ms) 2024-11-14T03:58:11,324 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., pid=6, masterSystemTime=1731556691222 2024-11-14T03:58:11,328 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=817cba3f933a29fea91e92b5cfc2ff1c, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:11,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 because future has completed 2024-11-14T03:58:11,337 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:11,337 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:11,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T03:58:11,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 in 294 msec 2024-11-14T03:58:11,368 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T03:58:11,369 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731556691368"}]},"ts":"1731556691368"} 2024-11-14T03:58:11,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T03:58:11,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, ASSIGN in 461 msec 2024-11-14T03:58:11,372 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-14T03:58:11,373 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T03:58:11,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 613 msec 2024-11-14T03:58:12,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:12,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:13,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:13,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:14,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:14,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:15,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:15,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:15,993 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:58:15,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:15,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:15,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:15,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:15,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:15,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:16,089 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T03:58:16,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-14T03:58:16,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:16,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:17,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:17,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:18,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:18,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:19,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:19,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:20,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:20,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:20,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46343 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T03:58:20,821 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-14T03:58:20,821 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-14T03:58:20,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-14T03:58:20,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:20,827 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2] 2024-11-14T03:58:20,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:20,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 817cba3f933a29fea91e92b5cfc2ff1c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:58:20,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/0d08c74b6d0845049f141c2fe780896a is 1080, key is row0001/info:/1731556700828/Put/seqid=0 2024-11-14T03:58:20,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741837_1013 (size=12509) 2024-11-14T03:58:20,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741837_1013 (size=12509) 2024-11-14T03:58:20,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/0d08c74b6d0845049f141c2fe780896a 2024-11-14T03:58:20,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T03:58:20,875 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T03:58:20,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/0d08c74b6d0845049f141c2fe780896a as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/0d08c74b6d0845049f141c2fe780896a 2024-11-14T03:58:20,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:58:20,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/0d08c74b6d0845049f141c2fe780896a, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T03:58:20,890 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 817cba3f933a29fea91e92b5cfc2ff1c in 51ms, sequenceid=11, compaction requested=false 2024-11-14T03:58:20,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:20,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53488 deadline: 1731556710887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 2024-11-14T03:58:20,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:58:20,895 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:58:20,895 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 because the exception is null or not the one we care about 2024-11-14T03:58:21,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:21,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:22,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:22,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:23,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:23,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:24,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:24,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:25,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:25,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:26,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:26,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:27,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:27,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:28,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:28,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:29,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:29,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:30,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:30,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:31,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:31,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 817cba3f933a29fea91e92b5cfc2ff1c 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-14T03:58:31,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/8b16b57ad6fd490dac11ba26a135293f is 1080, key is row0008/info:/1731556700840/Put/seqid=0 2024-11-14T03:58:31,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741838_1014 (size=29761) 2024-11-14T03:58:31,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/8b16b57ad6fd490dac11ba26a135293f 2024-11-14T03:58:31,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741838_1014 (size=29761) 2024-11-14T03:58:31,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/8b16b57ad6fd490dac11ba26a135293f as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f 2024-11-14T03:58:31,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f, entries=23, sequenceid=37, filesize=29.1 K 2024-11-14T03:58:31,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 817cba3f933a29fea91e92b5cfc2ff1c in 103ms, sequenceid=37, compaction requested=false 2024-11-14T03:58:31,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:31,112 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-14T03:58:31,112 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:31,112 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f because midkey is the same as first or last row 2024-11-14T03:58:31,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:31,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:32,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:32,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:33,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:33,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 817cba3f933a29fea91e92b5cfc2ff1c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:58:33,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/bc7558eaef684ba8aec89a1d4723b81a is 1080, key is row0031/info:/1731556711010/Put/seqid=0 2024-11-14T03:58:33,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741839_1015 (size=12509) 2024-11-14T03:58:33,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741839_1015 (size=12509) 2024-11-14T03:58:33,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/bc7558eaef684ba8aec89a1d4723b81a 2024-11-14T03:58:33,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/bc7558eaef684ba8aec89a1d4723b81a as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/bc7558eaef684ba8aec89a1d4723b81a 2024-11-14T03:58:33,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/bc7558eaef684ba8aec89a1d4723b81a, entries=7, sequenceid=47, filesize=12.2 K 2024-11-14T03:58:33,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 817cba3f933a29fea91e92b5cfc2ff1c in 116ms, sequenceid=47, compaction requested=true 2024-11-14T03:58:33,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:33,196 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-14T03:58:33,196 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:33,196 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f because midkey is the same as first or last row 2024-11-14T03:58:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 817cba3f933a29fea91e92b5cfc2ff1c:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:58:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:33,209 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:58:33,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 817cba3f933a29fea91e92b5cfc2ff1c 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-14T03:58:33,211 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:58:33,211 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): 817cba3f933a29fea91e92b5cfc2ff1c/info is initiating minor compaction (all files) 2024-11-14T03:58:33,211 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 817cba3f933a29fea91e92b5cfc2ff1c/info in TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:33,213 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/0d08c74b6d0845049f141c2fe780896a, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/bc7558eaef684ba8aec89a1d4723b81a] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp, totalSize=53.5 K 2024-11-14T03:58:33,214 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0d08c74b6d0845049f141c2fe780896a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731556700828 2024-11-14T03:58:33,218 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b16b57ad6fd490dac11ba26a135293f, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731556700840 2024-11-14T03:58:33,218 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting bc7558eaef684ba8aec89a1d4723b81a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731556711010 2024-11-14T03:58:33,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/c1da810c46084a19a6fd8d58f661e90b is 1080, key is row0038/info:/1731556713083/Put/seqid=0 2024-11-14T03:58:33,253 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 817cba3f933a29fea91e92b5cfc2ff1c#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:58:33,253 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/e8bd7815866140ae92eb5ab609eb99e9 is 1080, key is row0001/info:/1731556700828/Put/seqid=0 2024-11-14T03:58:33,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:33,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:33,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741840_1016 (size=26530) 2024-11-14T03:58:33,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741840_1016 (size=26530) 2024-11-14T03:58:33,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/c1da810c46084a19a6fd8d58f661e90b 2024-11-14T03:58:33,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/c1da810c46084a19a6fd8d58f661e90b as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/c1da810c46084a19a6fd8d58f661e90b 2024-11-14T03:58:33,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/c1da810c46084a19a6fd8d58f661e90b, entries=20, sequenceid=70, filesize=25.9 K 2024-11-14T03:58:33,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=7.36 KB/7532 for 817cba3f933a29fea91e92b5cfc2ff1c in 136ms, sequenceid=70, compaction requested=false 2024-11-14T03:58:33,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:33,346 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.4 K, sizeToCheck=16.0 K 2024-11-14T03:58:33,346 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:33,346 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f because midkey is the same as first or last row 2024-11-14T03:58:33,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741841_1017 (size=44978) 2024-11-14T03:58:33,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741841_1017 (size=44978) 2024-11-14T03:58:33,393 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/e8bd7815866140ae92eb5ab609eb99e9 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 2024-11-14T03:58:33,410 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 817cba3f933a29fea91e92b5cfc2ff1c/info of 817cba3f933a29fea91e92b5cfc2ff1c into e8bd7815866140ae92eb5ab609eb99e9(size=43.9 K), total size for store is 69.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:58:33,410 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:33,411 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., storeName=817cba3f933a29fea91e92b5cfc2ff1c/info, priority=13, startTime=1731556713196; duration=0sec 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 because midkey is the same as first or last row 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 because midkey is the same as first or last row 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 because midkey is the same as first or last row 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:33,411 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 817cba3f933a29fea91e92b5cfc2ff1c:info 2024-11-14T03:58:34,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:34,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:35,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 817cba3f933a29fea91e92b5cfc2ff1c 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-14T03:58:35,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/399b2d3d6dae4480905f1544506d3418 is 1080, key is row0058/info:/1731556713212/Put/seqid=0 2024-11-14T03:58:35,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:35,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:35,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741842_1018 (size=13586) 2024-11-14T03:58:35,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:58:35,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53488 deadline: 1731556725331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 2024-11-14T03:58:35,334 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:58:35,334 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:58:35,334 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 because the exception is null or not the one we care about 2024-11-14T03:58:35,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/399b2d3d6dae4480905f1544506d3418 2024-11-14T03:58:35,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741842_1018 (size=13586) 2024-11-14T03:58:35,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/399b2d3d6dae4480905f1544506d3418 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/399b2d3d6dae4480905f1544506d3418 2024-11-14T03:58:35,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/399b2d3d6dae4480905f1544506d3418, entries=8, sequenceid=82, filesize=13.3 K 2024-11-14T03:58:35,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=22.07 KB/22596 for 817cba3f933a29fea91e92b5cfc2ff1c in 127ms, sequenceid=82, compaction requested=true 2024-11-14T03:58:35,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:35,365 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-14T03:58:35,365 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:35,365 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 because midkey is the same as first or last row 2024-11-14T03:58:35,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 817cba3f933a29fea91e92b5cfc2ff1c:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:58:35,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:35,365 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:58:35,367 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:58:35,367 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): 817cba3f933a29fea91e92b5cfc2ff1c/info is initiating minor compaction (all files) 2024-11-14T03:58:35,367 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 817cba3f933a29fea91e92b5cfc2ff1c/info in TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:35,367 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/c1da810c46084a19a6fd8d58f661e90b, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/399b2d3d6dae4480905f1544506d3418] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp, totalSize=83.1 K 2024-11-14T03:58:35,369 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting e8bd7815866140ae92eb5ab609eb99e9, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731556700828 2024-11-14T03:58:35,370 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting c1da810c46084a19a6fd8d58f661e90b, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1731556713083 2024-11-14T03:58:35,371 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 399b2d3d6dae4480905f1544506d3418, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731556713212 2024-11-14T03:58:35,402 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 817cba3f933a29fea91e92b5cfc2ff1c#info#compaction#60 average throughput is 16.67 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:58:35,403 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/73cf57353b9e4064a5f5a88acddddad3 is 1080, key is row0001/info:/1731556700828/Put/seqid=0 2024-11-14T03:58:35,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741843_1019 (size=75378) 2024-11-14T03:58:35,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741843_1019 (size=75378) 2024-11-14T03:58:35,459 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/73cf57353b9e4064a5f5a88acddddad3 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3 2024-11-14T03:58:35,472 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 817cba3f933a29fea91e92b5cfc2ff1c/info of 817cba3f933a29fea91e92b5cfc2ff1c into 73cf57353b9e4064a5f5a88acddddad3(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:58:35,472 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 817cba3f933a29fea91e92b5cfc2ff1c: 2024-11-14T03:58:35,472 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., storeName=817cba3f933a29fea91e92b5cfc2ff1c/info, priority=13, startTime=1731556715365; duration=0sec 2024-11-14T03:58:35,472 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-14T03:58:35,472 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:35,472 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-14T03:58:35,473 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:35,473 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-14T03:58:35,473 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T03:58:35,477 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:35,477 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:35,477 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 817cba3f933a29fea91e92b5cfc2ff1c:info 2024-11-14T03:58:35,479 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46343 {}] assignment.AssignmentManager(1363): Split request from 6a989be77d39,40421,1731556688247, parent={ENCODED => 817cba3f933a29fea91e92b5cfc2ff1c, NAME => 'TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-14T03:58:35,486 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46343 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=6a989be77d39,40421,1731556688247 2024-11-14T03:58:35,495 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46343 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=817cba3f933a29fea91e92b5cfc2ff1c, daughterA=504095cd6401df6643acc3f612334809, daughterB=a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:35,497 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=817cba3f933a29fea91e92b5cfc2ff1c, daughterA=504095cd6401df6643acc3f612334809, daughterB=a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:35,497 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=817cba3f933a29fea91e92b5cfc2ff1c, daughterA=504095cd6401df6643acc3f612334809, daughterB=a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:35,497 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=817cba3f933a29fea91e92b5cfc2ff1c, daughterA=504095cd6401df6643acc3f612334809, daughterB=a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:35,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, UNASSIGN}] 2024-11-14T03:58:35,508 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, UNASSIGN 2024-11-14T03:58:35,511 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=817cba3f933a29fea91e92b5cfc2ff1c, regionState=CLOSING, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:35,515 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, UNASSIGN 2024-11-14T03:58:35,516 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-14T03:58:35,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247}] 2024-11-14T03:58:35,675 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:35,675 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-14T03:58:35,677 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 817cba3f933a29fea91e92b5cfc2ff1c, disabling compactions & flushes 2024-11-14T03:58:35,677 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:35,677 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:35,677 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. after waiting 0 ms 2024-11-14T03:58:35,677 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:35,677 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 817cba3f933a29fea91e92b5cfc2ff1c 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-14T03:58:35,691 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/e1b1590619a74d7d98a33c05e94a467a is 1080, key is row0066/info:/1731556715240/Put/seqid=0 2024-11-14T03:58:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741844_1020 (size=27607) 2024-11-14T03:58:35,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741844_1020 (size=27607) 2024-11-14T03:58:36,147 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=107 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/e1b1590619a74d7d98a33c05e94a467a 2024-11-14T03:58:36,182 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/.tmp/info/e1b1590619a74d7d98a33c05e94a467a as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e1b1590619a74d7d98a33c05e94a467a 2024-11-14T03:58:36,220 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e1b1590619a74d7d98a33c05e94a467a, entries=21, sequenceid=107, filesize=27.0 K 2024-11-14T03:58:36,222 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=0 B/0 for 817cba3f933a29fea91e92b5cfc2ff1c in 544ms, sequenceid=107, compaction requested=false 2024-11-14T03:58:36,230 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/0d08c74b6d0845049f141c2fe780896a, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/bc7558eaef684ba8aec89a1d4723b81a, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/c1da810c46084a19a6fd8d58f661e90b, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/399b2d3d6dae4480905f1544506d3418] to archive 2024-11-14T03:58:36,237 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T03:58:36,257 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/0d08c74b6d0845049f141c2fe780896a to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/0d08c74b6d0845049f141c2fe780896a 2024-11-14T03:58:36,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:36,269 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/8b16b57ad6fd490dac11ba26a135293f 2024-11-14T03:58:36,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:36,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e8bd7815866140ae92eb5ab609eb99e9 2024-11-14T03:58:36,293 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/bc7558eaef684ba8aec89a1d4723b81a to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/bc7558eaef684ba8aec89a1d4723b81a 2024-11-14T03:58:36,305 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/c1da810c46084a19a6fd8d58f661e90b to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/c1da810c46084a19a6fd8d58f661e90b 2024-11-14T03:58:36,322 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/399b2d3d6dae4480905f1544506d3418 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/399b2d3d6dae4480905f1544506d3418 2024-11-14T03:58:36,377 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/recovered.edits/110.seqid, newMaxSeqId=110, maxSeqId=1 2024-11-14T03:58:36,379 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. 2024-11-14T03:58:36,380 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 817cba3f933a29fea91e92b5cfc2ff1c: Waiting for close lock at 1731556715677Running coprocessor pre-close hooks at 1731556715677Disabling compacts and flushes for region at 1731556715677Disabling writes for close at 1731556715677Obtaining lock to block concurrent updates at 1731556715677Preparing flush snapshotting stores in 817cba3f933a29fea91e92b5cfc2ff1c at 1731556715677Finished memstore snapshotting TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., syncing WAL and waiting on mvcc, flushsize=dataSize=22596, getHeapSize=24432, getOffHeapSize=0, getCellsCount=21 at 1731556715678 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. at 1731556715679 (+1 ms)Flushing 817cba3f933a29fea91e92b5cfc2ff1c/info: creating writer at 1731556715679Flushing 817cba3f933a29fea91e92b5cfc2ff1c/info: appending metadata at 1731556715690 (+11 ms)Flushing 817cba3f933a29fea91e92b5cfc2ff1c/info: closing flushed file at 1731556715690Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d97075d: reopening flushed file at 1731556716176 (+486 ms)Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=0 B/0 for 817cba3f933a29fea91e92b5cfc2ff1c in 544ms, sequenceid=107, compaction requested=false at 1731556716222 (+46 ms)Writing region close event to WAL at 1731556716346 (+124 ms)Running coprocessor post-close hooks at 1731556716379 (+33 ms)Closed at 1731556716379 2024-11-14T03:58:36,401 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:36,409 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=817cba3f933a29fea91e92b5cfc2ff1c, regionState=CLOSED 2024-11-14T03:58:36,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 because future has completed 2024-11-14T03:58:36,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-14T03:58:36,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 817cba3f933a29fea91e92b5cfc2ff1c, server=6a989be77d39,40421,1731556688247 in 902 msec 2024-11-14T03:58:36,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T03:58:36,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=817cba3f933a29fea91e92b5cfc2ff1c, UNASSIGN in 914 msec 2024-11-14T03:58:36,476 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:36,483 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=817cba3f933a29fea91e92b5cfc2ff1c, threads=2 2024-11-14T03:58:36,488 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3 for region: 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:36,489 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e1b1590619a74d7d98a33c05e94a467a for region: 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:36,514 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e1b1590619a74d7d98a33c05e94a467a, top=true 2024-11-14T03:58:36,536 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a for child: a528a9ff41f6621f78f30d0307ffb8c3, parent: 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:36,536 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/e1b1590619a74d7d98a33c05e94a467a for region: 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:36,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741845_1021 (size=27) 2024-11-14T03:58:36,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741845_1021 (size=27) 2024-11-14T03:58:36,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741846_1022 (size=27) 2024-11-14T03:58:36,568 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3 for region: 817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:58:36,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741846_1022 (size=27) 2024-11-14T03:58:36,570 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 817cba3f933a29fea91e92b5cfc2ff1c Daughter A: [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c] storefiles, Daughter B: [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a] storefiles. 2024-11-14T03:58:36,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741847_1023 (size=71) 2024-11-14T03:58:36,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741847_1023 (size=71) 2024-11-14T03:58:36,635 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741848_1024 (size=71) 2024-11-14T03:58:36,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741848_1024 (size=71) 2024-11-14T03:58:36,684 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:36,703 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/recovered.edits/110.seqid, newMaxSeqId=110, maxSeqId=-1 2024-11-14T03:58:36,709 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/recovered.edits/110.seqid, newMaxSeqId=110, maxSeqId=-1 2024-11-14T03:58:36,713 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731556716712"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731556716712"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731556716712"}]},"ts":"1731556716712"} 2024-11-14T03:58:36,713 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731556716712"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556716712"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731556716712"}]},"ts":"1731556716712"} 2024-11-14T03:58:36,715 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731556716712"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731556716712"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731556716712"}]},"ts":"1731556716712"} 2024-11-14T03:58:36,759 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=504095cd6401df6643acc3f612334809, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a528a9ff41f6621f78f30d0307ffb8c3, ASSIGN}] 2024-11-14T03:58:36,764 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=504095cd6401df6643acc3f612334809, ASSIGN 2024-11-14T03:58:36,764 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a528a9ff41f6621f78f30d0307ffb8c3, ASSIGN 2024-11-14T03:58:36,765 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=504095cd6401df6643acc3f612334809, ASSIGN; state=SPLITTING_NEW, location=6a989be77d39,40421,1731556688247; forceNewPlan=false, retain=false 2024-11-14T03:58:36,766 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a528a9ff41f6621f78f30d0307ffb8c3, ASSIGN; state=SPLITTING_NEW, location=6a989be77d39,40421,1731556688247; forceNewPlan=false, retain=false 2024-11-14T03:58:36,916 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a528a9ff41f6621f78f30d0307ffb8c3, regionState=OPENING, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:36,917 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=504095cd6401df6643acc3f612334809, regionState=OPENING, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:36,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a528a9ff41f6621f78f30d0307ffb8c3, ASSIGN because future has completed 2024-11-14T03:58:36,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247}] 2024-11-14T03:58:36,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=504095cd6401df6643acc3f612334809, ASSIGN because future has completed 2024-11-14T03:58:36,925 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 504095cd6401df6643acc3f612334809, server=6a989be77d39,40421,1731556688247}] 2024-11-14T03:58:37,080 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:58:37,080 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => a528a9ff41f6621f78f30d0307ffb8c3, NAME => 'TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-14T03:58:37,081 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,081 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:37,081 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,081 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,083 INFO [StoreOpener-a528a9ff41f6621f78f30d0307ffb8c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,085 INFO [StoreOpener-a528a9ff41f6621f78f30d0307ffb8c3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a528a9ff41f6621f78f30d0307ffb8c3 columnFamilyName info 2024-11-14T03:58:37,085 DEBUG [StoreOpener-a528a9ff41f6621f78f30d0307ffb8c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:37,137 DEBUG [StoreOpener-a528a9ff41f6621f78f30d0307ffb8c3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c->hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3-top 2024-11-14T03:58:37,167 DEBUG [StoreOpener-a528a9ff41f6621f78f30d0307ffb8c3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a 2024-11-14T03:58:37,167 INFO [StoreOpener-a528a9ff41f6621f78f30d0307ffb8c3-1 {}] regionserver.HStore(327): Store=a528a9ff41f6621f78f30d0307ffb8c3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:58:37,167 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,174 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,185 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,186 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,186 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,189 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,191 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened a528a9ff41f6621f78f30d0307ffb8c3; next sequenceid=111; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698071, jitterRate=-0.11235730350017548}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:58:37,191 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:58:37,192 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for a528a9ff41f6621f78f30d0307ffb8c3: Running coprocessor pre-open hook at 1731556717081Writing region info on filesystem at 1731556717081Initializing all the Stores at 1731556717083 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556717083Cleaning up temporary data from old regions at 1731556717186 (+103 ms)Running coprocessor post-open hooks at 1731556717191 (+5 ms)Region opened successfully at 1731556717191 2024-11-14T03:58:37,194 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., pid=12, masterSystemTime=1731556717073 2024-11-14T03:58:37,194 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:58:37,194 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-14T03:58:37,194 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:37,196 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:58:37,196 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:58:37,196 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:58:37,197 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c->hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3-top, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=100.6 K 2024-11-14T03:58:37,197 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731556700828 2024-11-14T03:58:37,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1731556715240 2024-11-14T03:58:37,201 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:58:37,201 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:58:37,202 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:58:37,202 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 504095cd6401df6643acc3f612334809, NAME => 'TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-14T03:58:37,202 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,202 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:58:37,202 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,202 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,203 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a528a9ff41f6621f78f30d0307ffb8c3, regionState=OPEN, openSeqNum=111, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:37,206 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-14T03:58:37,206 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-14T03:58:37,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-14T03:58:37,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 because future has completed 2024-11-14T03:58:37,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-14T03:58:37,226 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 in 299 msec 2024-11-14T03:58:37,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a528a9ff41f6621f78f30d0307ffb8c3, ASSIGN in 464 msec 2024-11-14T03:58:37,236 INFO [StoreOpener-504095cd6401df6643acc3f612334809-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,237 INFO [StoreOpener-504095cd6401df6643acc3f612334809-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 504095cd6401df6643acc3f612334809 columnFamilyName info 2024-11-14T03:58:37,237 DEBUG [StoreOpener-504095cd6401df6643acc3f612334809-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:58:37,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:37,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/info/c1008ce6d5954be4990b3f6a54211133 is 193, key is TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3./info:regioninfo/1731556717202/Put/seqid=0 2024-11-14T03:58:37,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:37,276 DEBUG [StoreOpener-504095cd6401df6643acc3f612334809-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c->hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3-bottom 2024-11-14T03:58:37,277 INFO [StoreOpener-504095cd6401df6643acc3f612334809-1 {}] regionserver.HStore(327): Store=504095cd6401df6643acc3f612334809/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:58:37,277 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,279 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,280 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,280 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,280 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,284 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,285 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 504095cd6401df6643acc3f612334809; next sequenceid=111; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694262, jitterRate=-0.11720132827758789}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T03:58:37,286 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 504095cd6401df6643acc3f612334809 2024-11-14T03:58:37,286 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 504095cd6401df6643acc3f612334809: Running coprocessor pre-open hook at 1731556717202Writing region info on filesystem at 1731556717202Initializing all the Stores at 1731556717209 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556717209Cleaning up temporary data from old regions at 1731556717280 (+71 ms)Running coprocessor post-open hooks at 1731556717286 (+6 ms)Region opened successfully at 1731556717286 2024-11-14T03:58:37,287 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809., pid=13, masterSystemTime=1731556717073 2024-11-14T03:58:37,287 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 504095cd6401df6643acc3f612334809:info, priority=-2147483648, current under compaction store size is 2 2024-11-14T03:58:37,287 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-14T03:58:37,287 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-14T03:58:37,289 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:58:37,289 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1541): 504095cd6401df6643acc3f612334809/info is initiating minor compaction (all files) 2024-11-14T03:58:37,289 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 504095cd6401df6643acc3f612334809/info in TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:58:37,289 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c->hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3-bottom] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/.tmp, totalSize=73.6 K 2024-11-14T03:58:37,290 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#63 average throughput is 3.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:58:37,291 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/fb88787f434f44c8a5643202c424aa37 is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:58:37,292 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=504095cd6401df6643acc3f612334809, regionState=OPEN, openSeqNum=111, regionLocation=6a989be77d39,40421,1731556688247 2024-11-14T03:58:37,294 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.Compactor(225): Compacting 73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731556700828 2024-11-14T03:58:37,294 DEBUG [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:58:37,294 INFO [RS_OPEN_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:58:37,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 504095cd6401df6643acc3f612334809, server=6a989be77d39,40421,1731556688247 because future has completed 2024-11-14T03:58:37,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-14T03:58:37,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 504095cd6401df6643acc3f612334809, server=6a989be77d39,40421,1731556688247 in 381 msec 2024-11-14T03:58:37,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-14T03:58:37,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=504095cd6401df6643acc3f612334809, ASSIGN in 550 msec 2024-11-14T03:58:37,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=817cba3f933a29fea91e92b5cfc2ff1c, daughterA=504095cd6401df6643acc3f612334809, daughterB=a528a9ff41f6621f78f30d0307ffb8c3 in 1.8270 sec 2024-11-14T03:58:37,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741850_1026 (size=32081) 2024-11-14T03:58:37,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741850_1026 (size=32081) 2024-11-14T03:58:37,333 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/fb88787f434f44c8a5643202c424aa37 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fb88787f434f44c8a5643202c424aa37 2024-11-14T03:58:37,347 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 504095cd6401df6643acc3f612334809#info#compaction#64 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:58:37,348 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/.tmp/info/c5a3bf825b3846a8b32802d231a50ccc is 1080, key is row0001/info:/1731556700828/Put/seqid=0 2024-11-14T03:58:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741849_1025 (size=9882) 2024-11-14T03:58:37,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741849_1025 (size=9882) 2024-11-14T03:58:37,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/info/c1008ce6d5954be4990b3f6a54211133 2024-11-14T03:58:37,374 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into fb88787f434f44c8a5643202c424aa37(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:58:37,374 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:58:37,374 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=14, startTime=1731556717194; duration=0sec 2024-11-14T03:58:37,374 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:37,374 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:58:37,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741851_1027 (size=70862) 2024-11-14T03:58:37,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741851_1027 (size=70862) 2024-11-14T03:58:37,406 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/.tmp/info/c5a3bf825b3846a8b32802d231a50ccc as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/c5a3bf825b3846a8b32802d231a50ccc 2024-11-14T03:58:37,414 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 504095cd6401df6643acc3f612334809/info of 504095cd6401df6643acc3f612334809 into c5a3bf825b3846a8b32802d231a50ccc(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:58:37,414 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 504095cd6401df6643acc3f612334809: 2024-11-14T03:58:37,414 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809., storeName=504095cd6401df6643acc3f612334809/info, priority=15, startTime=1731556717287; duration=0sec 2024-11-14T03:58:37,415 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:58:37,415 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 504095cd6401df6643acc3f612334809:info 2024-11-14T03:58:37,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/ns/59aeaaf5a5324e2386d622f3e78eed69 is 43, key is default/ns:d/1731556690630/Put/seqid=0 2024-11-14T03:58:37,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741852_1028 (size=5153) 2024-11-14T03:58:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741852_1028 (size=5153) 2024-11-14T03:58:37,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/ns/59aeaaf5a5324e2386d622f3e78eed69 2024-11-14T03:58:37,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/table/ecff348d4b2e4d1b87a61b3616601cb6 is 65, key is TestLogRolling-testLogRolling/table:state/1731556691368/Put/seqid=0 2024-11-14T03:58:37,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741853_1029 (size=5340) 2024-11-14T03:58:37,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741853_1029 (size=5340) 2024-11-14T03:58:37,837 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:58:37,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/table/ecff348d4b2e4d1b87a61b3616601cb6 2024-11-14T03:58:37,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/info/c1008ce6d5954be4990b3f6a54211133 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/info/c1008ce6d5954be4990b3f6a54211133 2024-11-14T03:58:37,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/info/c1008ce6d5954be4990b3f6a54211133, entries=30, sequenceid=17, filesize=9.7 K 2024-11-14T03:58:37,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/ns/59aeaaf5a5324e2386d622f3e78eed69 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/ns/59aeaaf5a5324e2386d622f3e78eed69 2024-11-14T03:58:37,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/ns/59aeaaf5a5324e2386d622f3e78eed69, entries=2, sequenceid=17, filesize=5.0 K 2024-11-14T03:58:37,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/table/ecff348d4b2e4d1b87a61b3616601cb6 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/table/ecff348d4b2e4d1b87a61b3616601cb6 2024-11-14T03:58:38,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/table/ecff348d4b2e4d1b87a61b3616601cb6, entries=2, sequenceid=17, filesize=5.2 K 2024-11-14T03:58:38,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 800ms, sequenceid=17, compaction requested=false 2024-11-14T03:58:38,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T03:58:38,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:38,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:39,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:39,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:40,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:40,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:41,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:41,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:41,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,963 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:58:41,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:41,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,019 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:58:42,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:42,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:43,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:43,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:44,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:44,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:45,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:45,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:45,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53488 deadline: 1731556735351, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. is not online on 6a989be77d39,40421,1731556688247 2024-11-14T03:58:45,352 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. is not online on 6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:58:45,352 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c. is not online on 6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:58:45,352 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731556690760.817cba3f933a29fea91e92b5cfc2ff1c., hostname=6a989be77d39,40421,1731556688247, seqNum=2 from cache 2024-11-14T03:58:46,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:46,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:47,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:47,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:48,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:48,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:49,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:49,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:50,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:50,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:51,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:51,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:51,333 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T03:58:51,333 INFO [master/6a989be77d39:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T03:58:52,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:52,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:53,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:53,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:54,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:54,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:55,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:55,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:55,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-14T03:58:56,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:56,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:57,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:57,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:58,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:58,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:59,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:58:59,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:00,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:00,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:01,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:01,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:02,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:02,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:03,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:03,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:04,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:04,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:05,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:05,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:05,508 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0087', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111] 2024-11-14T03:59:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:05,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:59:05,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/35ca032015af4f2a98bcf3b9f34ab235 is 1080, key is row0087/info:/1731556745511/Put/seqid=0 2024-11-14T03:59:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741854_1030 (size=12509) 2024-11-14T03:59:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741854_1030 (size=12509) 2024-11-14T03:59:05,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/35ca032015af4f2a98bcf3b9f34ab235 2024-11-14T03:59:05,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/35ca032015af4f2a98bcf3b9f34ab235 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35ca032015af4f2a98bcf3b9f34ab235 2024-11-14T03:59:05,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35ca032015af4f2a98bcf3b9f34ab235, entries=7, sequenceid=121, filesize=12.2 K 2024-11-14T03:59:05,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=3.15 KB/3228 for a528a9ff41f6621f78f30d0307ffb8c3 in 76ms, sequenceid=121, compaction requested=false 2024-11-14T03:59:05,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:06,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:06,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:07,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:07,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:07,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:59:07,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/35fa5cf7fa4745d4ac300e1dde9c2d47 is 1080, key is row0094/info:/1731556745530/Put/seqid=0 2024-11-14T03:59:07,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741855_1031 (size=12512) 2024-11-14T03:59:07,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741855_1031 (size=12512) 2024-11-14T03:59:07,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/35fa5cf7fa4745d4ac300e1dde9c2d47 2024-11-14T03:59:07,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/35fa5cf7fa4745d4ac300e1dde9c2d47 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35fa5cf7fa4745d4ac300e1dde9c2d47 2024-11-14T03:59:07,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35fa5cf7fa4745d4ac300e1dde9c2d47, entries=7, sequenceid=131, filesize=12.2 K 2024-11-14T03:59:07,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for a528a9ff41f6621f78f30d0307ffb8c3 in 44ms, sequenceid=131, compaction requested=true 2024-11-14T03:59:07,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:07,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:59:07,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:07,594 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:59:07,595 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 57102 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:59:07,596 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:59:07,596 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:07,596 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fb88787f434f44c8a5643202c424aa37, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35ca032015af4f2a98bcf3b9f34ab235, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35fa5cf7fa4745d4ac300e1dde9c2d47] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=55.8 K 2024-11-14T03:59:07,596 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb88787f434f44c8a5643202c424aa37, keycount=25, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1731556713227 2024-11-14T03:59:07,597 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35ca032015af4f2a98bcf3b9f34ab235, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731556745511 2024-11-14T03:59:07,597 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35fa5cf7fa4745d4ac300e1dde9c2d47, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1731556745530 2024-11-14T03:59:07,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T03:59:07,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:07,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/0babca58f1c14d439931df12b75c9b82 is 1080, key is row0101/info:/1731556747552/Put/seqid=0 2024-11-14T03:59:07,630 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#70 average throughput is 10.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:59:07,630 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/39366ea3a76c443e85b4e972710e07ab is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:59:07,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741856_1032 (size=21156) 2024-11-14T03:59:07,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741856_1032 (size=21156) 2024-11-14T03:59:07,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/0babca58f1c14d439931df12b75c9b82 2024-11-14T03:59:07,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/0babca58f1c14d439931df12b75c9b82 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/0babca58f1c14d439931df12b75c9b82 2024-11-14T03:59:07,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/0babca58f1c14d439931df12b75c9b82, entries=15, sequenceid=149, filesize=20.7 K 2024-11-14T03:59:07,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for a528a9ff41f6621f78f30d0307ffb8c3 in 131ms, sequenceid=149, compaction requested=false 2024-11-14T03:59:07,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:07,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741857_1033 (size=47300) 2024-11-14T03:59:07,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741857_1033 (size=47300) 2024-11-14T03:59:07,837 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:59:08,143 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/39366ea3a76c443e85b4e972710e07ab as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/39366ea3a76c443e85b4e972710e07ab 2024-11-14T03:59:08,153 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into 39366ea3a76c443e85b4e972710e07ab(size=46.2 K), total size for store is 66.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:59:08,153 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:08,153 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=13, startTime=1731556747594; duration=0sec 2024-11-14T03:59:08,153 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:08,154 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:59:08,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:08,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:09,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:09,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:09,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T03:59:09,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/d57ef774bb3842ce9a2f0cb795dea69c is 1080, key is row0116/info:/1731556747599/Put/seqid=0 2024-11-14T03:59:09,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741858_1034 (size=20078) 2024-11-14T03:59:09,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741858_1034 (size=20078) 2024-11-14T03:59:09,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/d57ef774bb3842ce9a2f0cb795dea69c 2024-11-14T03:59:09,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:59:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53488 deadline: 1731556759693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 2024-11-14T03:59:09,694 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 , the old value is region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:59:09,694 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:59:09,694 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 because the exception is null or not the one we care about 2024-11-14T03:59:09,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/d57ef774bb3842ce9a2f0cb795dea69c as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/d57ef774bb3842ce9a2f0cb795dea69c 2024-11-14T03:59:09,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/d57ef774bb3842ce9a2f0cb795dea69c, entries=14, sequenceid=167, filesize=19.6 K 2024-11-14T03:59:09,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for a528a9ff41f6621f78f30d0307ffb8c3 in 77ms, sequenceid=167, compaction requested=true 2024-11-14T03:59:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:59:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:09,710 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:59:09,712 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88534 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:59:09,712 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:59:09,712 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:09,712 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/39366ea3a76c443e85b4e972710e07ab, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/0babca58f1c14d439931df12b75c9b82, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/d57ef774bb3842ce9a2f0cb795dea69c] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=86.5 K 2024-11-14T03:59:09,713 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 39366ea3a76c443e85b4e972710e07ab, keycount=39, bloomtype=ROW, size=46.2 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1731556713227 2024-11-14T03:59:09,717 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0babca58f1c14d439931df12b75c9b82, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731556747552 2024-11-14T03:59:09,718 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting d57ef774bb3842ce9a2f0cb795dea69c, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731556747599 2024-11-14T03:59:09,751 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#72 average throughput is 23.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:59:09,753 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/fff0d3c999c049b0b6d15fab532b2a46 is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:59:09,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741859_1035 (size=78805) 2024-11-14T03:59:09,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741859_1035 (size=78805) 2024-11-14T03:59:09,816 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/fff0d3c999c049b0b6d15fab532b2a46 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fff0d3c999c049b0b6d15fab532b2a46 2024-11-14T03:59:09,826 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into fff0d3c999c049b0b6d15fab532b2a46(size=77.0 K), total size for store is 77.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:59:09,827 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:09,827 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=13, startTime=1731556749710; duration=0sec 2024-11-14T03:59:09,827 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:09,827 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:59:10,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:10,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:11,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:11,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:11,868 DEBUG [master/6a989be77d39:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=60, reuseRatio=86.96% 2024-11-14T03:59:11,869 DEBUG [master/6a989be77d39:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-14T03:59:12,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:12,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:13,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:13,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:14,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:14,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:15,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:15,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:16,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:16,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:17,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:17,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:18,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:18,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:19,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:19,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:19,599 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:59:19,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:19,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:19,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-14T03:59:19,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/c9be0423f87a438191515055ea68d588 is 1080, key is row0130/info:/1731556749635/Put/seqid=0 2024-11-14T03:59:19,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741860_1036 (size=22238) 2024-11-14T03:59:19,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741860_1036 (size=22238) 2024-11-14T03:59:19,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/c9be0423f87a438191515055ea68d588 2024-11-14T03:59:19,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/c9be0423f87a438191515055ea68d588 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/c9be0423f87a438191515055ea68d588 2024-11-14T03:59:19,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:59:19,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/c9be0423f87a438191515055ea68d588, entries=16, sequenceid=187, filesize=21.7 K 2024-11-14T03:59:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53488 deadline: 1731556769797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 2024-11-14T03:59:19,798 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 , the old value is region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:59:19,799 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:59:19,799 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 because the exception is null or not the one we care about 2024-11-14T03:59:19,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for a528a9ff41f6621f78f30d0307ffb8c3 in 26ms, sequenceid=187, compaction requested=false 2024-11-14T03:59:19,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:20,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:20,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:21,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:21,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:22,081 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a528a9ff41f6621f78f30d0307ffb8c3, had cached 0 bytes from a total of 101043 2024-11-14T03:59:22,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 504095cd6401df6643acc3f612334809, had cached 0 bytes from a total of 70862 2024-11-14T03:59:22,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:22,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:23,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:23,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:24,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:24,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:25,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:25,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:26,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:26,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:27,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:27,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:28,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:28,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:29,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:29,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:29,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:29,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T03:59:29,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/1cdd751f68324603953a00b27645f65d is 1080, key is row0146/info:/1731556759774/Put/seqid=0 2024-11-14T03:59:29,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741861_1037 (size=20078) 2024-11-14T03:59:29,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741861_1037 (size=20078) 2024-11-14T03:59:29,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/1cdd751f68324603953a00b27645f65d 2024-11-14T03:59:29,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/1cdd751f68324603953a00b27645f65d as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/1cdd751f68324603953a00b27645f65d 2024-11-14T03:59:30,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/1cdd751f68324603953a00b27645f65d, entries=14, sequenceid=204, filesize=19.6 K 2024-11-14T03:59:30,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for a528a9ff41f6621f78f30d0307ffb8c3 in 159ms, sequenceid=204, compaction requested=true 2024-11-14T03:59:30,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:30,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:59:30,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-14T03:59:30,004 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:59:30,005 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 121121 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:59:30,005 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:59:30,005 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:30,005 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fff0d3c999c049b0b6d15fab532b2a46, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/c9be0423f87a438191515055ea68d588, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/1cdd751f68324603953a00b27645f65d] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=118.3 K 2024-11-14T03:59:30,006 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting fff0d3c999c049b0b6d15fab532b2a46, keycount=68, bloomtype=ROW, size=77.0 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731556713227 2024-11-14T03:59:30,009 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9be0423f87a438191515055ea68d588, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731556749635 2024-11-14T03:59:30,013 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1cdd751f68324603953a00b27645f65d, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731556759774 2024-11-14T03:59:30,088 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#75 average throughput is 16.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:59:30,089 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/962f1bfe65b54b779e8135b44948c5fa is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:59:30,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741862_1038 (size=111255) 2024-11-14T03:59:30,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741862_1038 (size=111255) 2024-11-14T03:59:30,177 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/962f1bfe65b54b779e8135b44948c5fa as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/962f1bfe65b54b779e8135b44948c5fa 2024-11-14T03:59:30,198 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into 962f1bfe65b54b779e8135b44948c5fa(size=108.6 K), total size for store is 108.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:59:30,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:30,198 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=13, startTime=1731556770004; duration=0sec 2024-11-14T03:59:30,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:30,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:59:30,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:30,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:31,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:31,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:31,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:59:31,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/cdc2a90cc6f64eb2bc357f1f77ba34e9 is 1080, key is row0160/info:/1731556769849/Put/seqid=0 2024-11-14T03:59:31,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T03:59:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53488 deadline: 1731556781941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 2024-11-14T03:59:31,942 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 , the old value is region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:59:31,943 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a528a9ff41f6621f78f30d0307ffb8c3, server=6a989be77d39,40421,1731556688247 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T03:59:31,943 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., hostname=6a989be77d39,40421,1731556688247, seqNum=111 because the exception is null or not the one we care about 2024-11-14T03:59:31,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741863_1039 (size=12516) 2024-11-14T03:59:31,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741863_1039 (size=12516) 2024-11-14T03:59:31,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/cdc2a90cc6f64eb2bc357f1f77ba34e9 2024-11-14T03:59:31,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/cdc2a90cc6f64eb2bc357f1f77ba34e9 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/cdc2a90cc6f64eb2bc357f1f77ba34e9 2024-11-14T03:59:31,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/cdc2a90cc6f64eb2bc357f1f77ba34e9, entries=7, sequenceid=215, filesize=12.2 K 2024-11-14T03:59:31,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for a528a9ff41f6621f78f30d0307ffb8c3 in 112ms, sequenceid=215, compaction requested=false 2024-11-14T03:59:31,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:32,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:32,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:33,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:33,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:34,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:34,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:35,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:35,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:36,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:36,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:37,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:37,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:37,841 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T03:59:38,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:38,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:39,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:39,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:40,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:40,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:40,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-14T03:59:41,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:41,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:42,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-14T03:59:42,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/7279fe52a3a74fe59e52224d4a8c1368 is 1080, key is row0167/info:/1731556771884/Put/seqid=0 2024-11-14T03:59:42,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741864_1040 (size=29784) 2024-11-14T03:59:42,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741864_1040 (size=29784) 2024-11-14T03:59:42,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/7279fe52a3a74fe59e52224d4a8c1368 2024-11-14T03:59:42,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/7279fe52a3a74fe59e52224d4a8c1368 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/7279fe52a3a74fe59e52224d4a8c1368 2024-11-14T03:59:42,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/7279fe52a3a74fe59e52224d4a8c1368, entries=23, sequenceid=241, filesize=29.1 K 2024-11-14T03:59:42,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for a528a9ff41f6621f78f30d0307ffb8c3 in 117ms, sequenceid=241, compaction requested=true 2024-11-14T03:59:42,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:42,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:59:42,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:42,162 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:59:42,164 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 153555 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:59:42,164 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:59:42,164 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:42,164 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/962f1bfe65b54b779e8135b44948c5fa, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/cdc2a90cc6f64eb2bc357f1f77ba34e9, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/7279fe52a3a74fe59e52224d4a8c1368] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=150.0 K 2024-11-14T03:59:42,165 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.Compactor(225): Compacting 962f1bfe65b54b779e8135b44948c5fa, keycount=98, bloomtype=ROW, size=108.6 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731556713227 2024-11-14T03:59:42,168 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.Compactor(225): Compacting cdc2a90cc6f64eb2bc357f1f77ba34e9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731556769849 2024-11-14T03:59:42,168 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] compactions.Compactor(225): Compacting 7279fe52a3a74fe59e52224d4a8c1368, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1731556771884 2024-11-14T03:59:42,205 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#78 average throughput is 43.78 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:59:42,206 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/a1c124ba3e42479e900c7ab229405fcc is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:59:42,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741865_1041 (size=143902) 2024-11-14T03:59:42,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741865_1041 (size=143902) 2024-11-14T03:59:42,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:42,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:42,668 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/a1c124ba3e42479e900c7ab229405fcc as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/a1c124ba3e42479e900c7ab229405fcc 2024-11-14T03:59:42,684 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into a1c124ba3e42479e900c7ab229405fcc(size=140.5 K), total size for store is 140.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:59:42,684 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:42,684 INFO [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=13, startTime=1731556782162; duration=0sec 2024-11-14T03:59:42,684 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:42,684 DEBUG [RS:0;6a989be77d39:40421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:59:43,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:43,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:44,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:44,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T03:59:44,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/73745fb2a38b41d692a31a29eb1f28f7 is 1080, key is row0190/info:/1731556782054/Put/seqid=0 2024-11-14T03:59:44,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741866_1042 (size=12516) 2024-11-14T03:59:44,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741866_1042 (size=12516) 2024-11-14T03:59:44,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/73745fb2a38b41d692a31a29eb1f28f7 2024-11-14T03:59:44,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/73745fb2a38b41d692a31a29eb1f28f7 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73745fb2a38b41d692a31a29eb1f28f7 2024-11-14T03:59:44,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73745fb2a38b41d692a31a29eb1f28f7, entries=7, sequenceid=252, filesize=12.2 K 2024-11-14T03:59:44,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for a528a9ff41f6621f78f30d0307ffb8c3 in 28ms, sequenceid=252, compaction requested=false 2024-11-14T03:59:44,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:44,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T03:59:44,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/4204ef1698c64e0db3d8844767c497d1 is 1080, key is row0197/info:/1731556784072/Put/seqid=0 2024-11-14T03:59:44,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741867_1043 (size=21169) 2024-11-14T03:59:44,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741867_1043 (size=21169) 2024-11-14T03:59:44,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/4204ef1698c64e0db3d8844767c497d1 2024-11-14T03:59:44,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/4204ef1698c64e0db3d8844767c497d1 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/4204ef1698c64e0db3d8844767c497d1 2024-11-14T03:59:44,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/4204ef1698c64e0db3d8844767c497d1, entries=15, sequenceid=270, filesize=20.7 K 2024-11-14T03:59:44,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for a528a9ff41f6621f78f30d0307ffb8c3 in 26ms, sequenceid=270, compaction requested=true 2024-11-14T03:59:44,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:44,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:59:44,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:44,127 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:59:44,127 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 177587 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:59:44,128 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:59:44,128 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:44,128 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/a1c124ba3e42479e900c7ab229405fcc, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73745fb2a38b41d692a31a29eb1f28f7, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/4204ef1698c64e0db3d8844767c497d1] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=173.4 K 2024-11-14T03:59:44,128 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1c124ba3e42479e900c7ab229405fcc, keycount=128, bloomtype=ROW, size=140.5 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1731556713227 2024-11-14T03:59:44,128 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73745fb2a38b41d692a31a29eb1f28f7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1731556782054 2024-11-14T03:59:44,129 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4204ef1698c64e0db3d8844767c497d1, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731556784072 2024-11-14T03:59:44,139 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#81 average throughput is 76.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:59:44,140 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/798fcfeb1d6a4ed38c652727a6049eac is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:59:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741868_1044 (size=167737) 2024-11-14T03:59:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741868_1044 (size=167737) 2024-11-14T03:59:44,157 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/798fcfeb1d6a4ed38c652727a6049eac as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/798fcfeb1d6a4ed38c652727a6049eac 2024-11-14T03:59:44,164 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into 798fcfeb1d6a4ed38c652727a6049eac(size=163.8 K), total size for store is 163.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:59:44,164 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:44,164 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=13, startTime=1731556784126; duration=0sec 2024-11-14T03:59:44,165 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:44,165 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:59:44,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:44,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:45,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:45,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:46,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T03:59:46,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/dd7f69680cc2498e81b891db1ec4ef8b is 1080, key is row0212/info:/1731556784101/Put/seqid=0 2024-11-14T03:59:46,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741869_1045 (size=20092) 2024-11-14T03:59:46,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741869_1045 (size=20092) 2024-11-14T03:59:46,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/dd7f69680cc2498e81b891db1ec4ef8b 2024-11-14T03:59:46,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/dd7f69680cc2498e81b891db1ec4ef8b as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/dd7f69680cc2498e81b891db1ec4ef8b 2024-11-14T03:59:46,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/dd7f69680cc2498e81b891db1ec4ef8b, entries=14, sequenceid=288, filesize=19.6 K 2024-11-14T03:59:46,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for a528a9ff41f6621f78f30d0307ffb8c3 in 21ms, sequenceid=288, compaction requested=false 2024-11-14T03:59:46,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:46,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T03:59:46,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/87aaec3941b04365b8aaf81fcd31a6c3 is 1080, key is row0226/info:/1731556786127/Put/seqid=0 2024-11-14T03:59:46,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741870_1046 (size=21171) 2024-11-14T03:59:46,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741870_1046 (size=21171) 2024-11-14T03:59:46,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/87aaec3941b04365b8aaf81fcd31a6c3 2024-11-14T03:59:46,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/87aaec3941b04365b8aaf81fcd31a6c3 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/87aaec3941b04365b8aaf81fcd31a6c3 2024-11-14T03:59:46,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/87aaec3941b04365b8aaf81fcd31a6c3, entries=15, sequenceid=306, filesize=20.7 K 2024-11-14T03:59:46,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for a528a9ff41f6621f78f30d0307ffb8c3 in 21ms, sequenceid=306, compaction requested=true 2024-11-14T03:59:46,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:46,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a528a9ff41f6621f78f30d0307ffb8c3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T03:59:46,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:46,169 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T03:59:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40421 {}] regionserver.HRegion(8855): Flush requested on a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:46,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T03:59:46,170 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209000 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T03:59:46,170 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1541): a528a9ff41f6621f78f30d0307ffb8c3/info is initiating minor compaction (all files) 2024-11-14T03:59:46,171 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a528a9ff41f6621f78f30d0307ffb8c3/info in TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:46,171 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/798fcfeb1d6a4ed38c652727a6049eac, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/dd7f69680cc2498e81b891db1ec4ef8b, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/87aaec3941b04365b8aaf81fcd31a6c3] into tmpdir=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp, totalSize=204.1 K 2024-11-14T03:59:46,171 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 798fcfeb1d6a4ed38c652727a6049eac, keycount=150, bloomtype=ROW, size=163.8 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731556713227 2024-11-14T03:59:46,172 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting dd7f69680cc2498e81b891db1ec4ef8b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731556784101 2024-11-14T03:59:46,172 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] compactions.Compactor(225): Compacting 87aaec3941b04365b8aaf81fcd31a6c3, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1731556786127 2024-11-14T03:59:46,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/b23f57a0692a4a98b04ec772fa4361a3 is 1080, key is row0241/info:/1731556786149/Put/seqid=0 2024-11-14T03:59:46,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741871_1047 (size=20092) 2024-11-14T03:59:46,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741871_1047 (size=20092) 2024-11-14T03:59:46,184 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a528a9ff41f6621f78f30d0307ffb8c3#info#compaction#85 average throughput is 61.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T03:59:46,184 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/6d7d32ec993b4c17af0d805bbeb333a7 is 1080, key is row0062/info:/1731556713227/Put/seqid=0 2024-11-14T03:59:46,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741872_1048 (size=199138) 2024-11-14T03:59:46,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741872_1048 (size=199138) 2024-11-14T03:59:46,192 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/6d7d32ec993b4c17af0d805bbeb333a7 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/6d7d32ec993b4c17af0d805bbeb333a7 2024-11-14T03:59:46,198 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a528a9ff41f6621f78f30d0307ffb8c3/info of a528a9ff41f6621f78f30d0307ffb8c3 into 6d7d32ec993b4c17af0d805bbeb333a7(size=194.5 K), total size for store is 194.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T03:59:46,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:46,198 INFO [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., storeName=a528a9ff41f6621f78f30d0307ffb8c3/info, priority=13, startTime=1731556786169; duration=0sec 2024-11-14T03:59:46,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T03:59:46,198 DEBUG [RS:0;6a989be77d39:40421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a528a9ff41f6621f78f30d0307ffb8c3:info 2024-11-14T03:59:46,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:46,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta after 196155ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:59:46,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:46,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 after 196177ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T03:59:46,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/b23f57a0692a4a98b04ec772fa4361a3 2024-11-14T03:59:46,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/b23f57a0692a4a98b04ec772fa4361a3 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/b23f57a0692a4a98b04ec772fa4361a3 2024-11-14T03:59:46,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/b23f57a0692a4a98b04ec772fa4361a3, entries=14, sequenceid=323, filesize=19.6 K 2024-11-14T03:59:46,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for a528a9ff41f6621f78f30d0307ffb8c3 in 421ms, sequenceid=323, compaction requested=false 2024-11-14T03:59:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:47,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:47,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:48,173 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-14T03:59:48,173 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C40421%2C1731556688247.1731556788173 2024-11-14T03:59:48,191 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,191 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,191 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,191 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,191 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,191 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556689775 with entries=308, filesize=306.93 KB; new WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556788173 2024-11-14T03:59:48,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42019:42019),(127.0.0.1/127.0.0.1:37239:37239)] 2024-11-14T03:59:48,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556689775 is not closed yet, will try archiving it next time 2024-11-14T03:59:48,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741833_1009 (size=314308) 2024-11-14T03:59:48,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741833_1009 (size=314308) 2024-11-14T03:59:48,196 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a528a9ff41f6621f78f30d0307ffb8c3 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-14T03:59:48,200 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/be964f9994b24d87aca0184259e1415f is 1080, key is row0255/info:/1731556786170/Put/seqid=0 2024-11-14T03:59:48,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741874_1050 (size=7116) 2024-11-14T03:59:48,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741874_1050 (size=7116) 2024-11-14T03:59:48,204 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/be964f9994b24d87aca0184259e1415f 2024-11-14T03:59:48,213 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/.tmp/info/be964f9994b24d87aca0184259e1415f as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/be964f9994b24d87aca0184259e1415f 2024-11-14T03:59:48,218 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/be964f9994b24d87aca0184259e1415f, entries=2, sequenceid=329, filesize=6.9 K 2024-11-14T03:59:48,219 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for a528a9ff41f6621f78f30d0307ffb8c3 in 23ms, sequenceid=329, compaction requested=true 2024-11-14T03:59:48,219 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a528a9ff41f6621f78f30d0307ffb8c3: 2024-11-14T03:59:48,219 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 504095cd6401df6643acc3f612334809: 2024-11-14T03:59:48,219 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-14T03:59:48,223 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/info/4c3b2e2ad59246599f1dda2c01c56c6c is 186, key is TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809./info:regioninfo/1731556717292/Put/seqid=0 2024-11-14T03:59:48,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741875_1051 (size=6153) 2024-11-14T03:59:48,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741875_1051 (size=6153) 2024-11-14T03:59:48,227 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/info/4c3b2e2ad59246599f1dda2c01c56c6c 2024-11-14T03:59:48,234 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/.tmp/info/4c3b2e2ad59246599f1dda2c01c56c6c as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/info/4c3b2e2ad59246599f1dda2c01c56c6c 2024-11-14T03:59:48,239 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/info/4c3b2e2ad59246599f1dda2c01c56c6c, entries=5, sequenceid=21, filesize=6.0 K 2024-11-14T03:59:48,240 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-14T03:59:48,240 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T03:59:48,240 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C40421%2C1731556688247.1731556788240 2024-11-14T03:59:48,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,250 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556788173 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556788240 2024-11-14T03:59:48,251 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37239:37239),(127.0.0.1/127.0.0.1:42019:42019)] 2024-11-14T03:59:48,251 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556788173 is not closed yet, will try archiving it next time 2024-11-14T03:59:48,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741873_1049 (size=731) 2024-11-14T03:59:48,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741873_1049 (size=731) 2024-11-14T03:59:48,265 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556689775 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/oldWALs/6a989be77d39%2C40421%2C1731556688247.1731556689775 2024-11-14T03:59:48,266 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T03:59:48,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:48,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:48,658 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/WALs/6a989be77d39,40421,1731556688247/6a989be77d39%2C40421%2C1731556688247.1731556788173 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/oldWALs/6a989be77d39%2C40421%2C1731556688247.1731556788173 2024-11-14T03:59:48,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:59:48,667 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:59:48,667 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:59:48,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:48,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:48,667 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:59:48,668 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:59:48,668 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1028471360, stopped=false 2024-11-14T03:59:48,668 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,46343,1731556687906 2024-11-14T03:59:48,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:59:48,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:48,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:59:48,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:48,671 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:59:48,671 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:59:48,672 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:59:48,672 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:59:48,672 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:59:48,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:48,672 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,40421,1731556688247' ***** 2024-11-14T03:59:48,672 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:59:48,673 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:59:48,673 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:59:48,673 INFO [RS:0;6a989be77d39:40421 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:59:48,673 INFO [RS:0;6a989be77d39:40421 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(3091): Received CLOSE for a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(3091): Received CLOSE for 504095cd6401df6643acc3f612334809 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,40421,1731556688247 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:59:48,674 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a528a9ff41f6621f78f30d0307ffb8c3, disabling compactions & flushes 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:40421. 2024-11-14T03:59:48,674 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:48,674 DEBUG [RS:0;6a989be77d39:40421 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:59:48,674 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:48,674 DEBUG [RS:0;6a989be77d39:40421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:48,674 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. after waiting 0 ms 2024-11-14T03:59:48,674 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:59:48,674 INFO [RS:0;6a989be77d39:40421 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:59:48,675 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:59:48,675 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c->hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3-top, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fb88787f434f44c8a5643202c424aa37, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35ca032015af4f2a98bcf3b9f34ab235, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/39366ea3a76c443e85b4e972710e07ab, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35fa5cf7fa4745d4ac300e1dde9c2d47, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/0babca58f1c14d439931df12b75c9b82, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fff0d3c999c049b0b6d15fab532b2a46, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/d57ef774bb3842ce9a2f0cb795dea69c, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/c9be0423f87a438191515055ea68d588, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/962f1bfe65b54b779e8135b44948c5fa, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/1cdd751f68324603953a00b27645f65d, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/cdc2a90cc6f64eb2bc357f1f77ba34e9, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/a1c124ba3e42479e900c7ab229405fcc, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/7279fe52a3a74fe59e52224d4a8c1368, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73745fb2a38b41d692a31a29eb1f28f7, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/798fcfeb1d6a4ed38c652727a6049eac, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/4204ef1698c64e0db3d8844767c497d1, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/dd7f69680cc2498e81b891db1ec4ef8b, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/87aaec3941b04365b8aaf81fcd31a6c3] to archive 2024-11-14T03:59:48,679 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T03:59:48,681 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-14T03:59:48,681 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1325): Online Regions={a528a9ff41f6621f78f30d0307ffb8c3=TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3., 504095cd6401df6643acc3f612334809=TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T03:59:48,681 DEBUG [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 504095cd6401df6643acc3f612334809, a528a9ff41f6621f78f30d0307ffb8c3 2024-11-14T03:59:48,681 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:59:48,681 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:59:48,682 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:59:48,682 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:59:48,682 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:59:48,689 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:59:48,695 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fb88787f434f44c8a5643202c424aa37 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fb88787f434f44c8a5643202c424aa37 2024-11-14T03:59:48,698 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/TestLogRolling-testLogRolling=817cba3f933a29fea91e92b5cfc2ff1c-e1b1590619a74d7d98a33c05e94a467a 2024-11-14T03:59:48,700 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35ca032015af4f2a98bcf3b9f34ab235 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35ca032015af4f2a98bcf3b9f34ab235 2024-11-14T03:59:48,709 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/39366ea3a76c443e85b4e972710e07ab to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/39366ea3a76c443e85b4e972710e07ab 2024-11-14T03:59:48,711 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-14T03:59:48,713 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:59:48,713 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:59:48,713 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556788681Running coprocessor pre-close hooks at 1731556788681Disabling compacts and flushes for region at 1731556788681Disabling writes for close at 1731556788682 (+1 ms)Writing region close event to WAL at 1731556788687 (+5 ms)Running coprocessor post-close hooks at 1731556788713 (+26 ms)Closed at 1731556788713 2024-11-14T03:59:48,713 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:59:48,717 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35fa5cf7fa4745d4ac300e1dde9c2d47 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/35fa5cf7fa4745d4ac300e1dde9c2d47 2024-11-14T03:59:48,725 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/0babca58f1c14d439931df12b75c9b82 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/0babca58f1c14d439931df12b75c9b82 2024-11-14T03:59:48,727 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fff0d3c999c049b0b6d15fab532b2a46 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/fff0d3c999c049b0b6d15fab532b2a46 2024-11-14T03:59:48,729 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/d57ef774bb3842ce9a2f0cb795dea69c to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/d57ef774bb3842ce9a2f0cb795dea69c 2024-11-14T03:59:48,731 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/c9be0423f87a438191515055ea68d588 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/c9be0423f87a438191515055ea68d588 2024-11-14T03:59:48,732 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/962f1bfe65b54b779e8135b44948c5fa to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/962f1bfe65b54b779e8135b44948c5fa 2024-11-14T03:59:48,734 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/1cdd751f68324603953a00b27645f65d to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/1cdd751f68324603953a00b27645f65d 2024-11-14T03:59:48,736 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/cdc2a90cc6f64eb2bc357f1f77ba34e9 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/cdc2a90cc6f64eb2bc357f1f77ba34e9 2024-11-14T03:59:48,737 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/a1c124ba3e42479e900c7ab229405fcc to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/a1c124ba3e42479e900c7ab229405fcc 2024-11-14T03:59:48,739 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/7279fe52a3a74fe59e52224d4a8c1368 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/7279fe52a3a74fe59e52224d4a8c1368 2024-11-14T03:59:48,740 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73745fb2a38b41d692a31a29eb1f28f7 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/73745fb2a38b41d692a31a29eb1f28f7 2024-11-14T03:59:48,742 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/798fcfeb1d6a4ed38c652727a6049eac to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/798fcfeb1d6a4ed38c652727a6049eac 2024-11-14T03:59:48,743 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/4204ef1698c64e0db3d8844767c497d1 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/4204ef1698c64e0db3d8844767c497d1 2024-11-14T03:59:48,745 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/dd7f69680cc2498e81b891db1ec4ef8b to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/dd7f69680cc2498e81b891db1ec4ef8b 2024-11-14T03:59:48,748 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/87aaec3941b04365b8aaf81fcd31a6c3 to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/info/87aaec3941b04365b8aaf81fcd31a6c3 2024-11-14T03:59:48,749 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6a989be77d39:46343 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T03:59:48,750 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fb88787f434f44c8a5643202c424aa37=32081, 35ca032015af4f2a98bcf3b9f34ab235=12509, 39366ea3a76c443e85b4e972710e07ab=47300, 35fa5cf7fa4745d4ac300e1dde9c2d47=12512, 0babca58f1c14d439931df12b75c9b82=21156, fff0d3c999c049b0b6d15fab532b2a46=78805, d57ef774bb3842ce9a2f0cb795dea69c=20078, c9be0423f87a438191515055ea68d588=22238, 962f1bfe65b54b779e8135b44948c5fa=111255, 1cdd751f68324603953a00b27645f65d=20078, cdc2a90cc6f64eb2bc357f1f77ba34e9=12516, a1c124ba3e42479e900c7ab229405fcc=143902, 7279fe52a3a74fe59e52224d4a8c1368=29784, 73745fb2a38b41d692a31a29eb1f28f7=12516, 798fcfeb1d6a4ed38c652727a6049eac=167737, 4204ef1698c64e0db3d8844767c497d1=21169, dd7f69680cc2498e81b891db1ec4ef8b=20092, 87aaec3941b04365b8aaf81fcd31a6c3=21171] 2024-11-14T03:59:48,766 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/a528a9ff41f6621f78f30d0307ffb8c3/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=110 2024-11-14T03:59:48,767 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:48,767 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a528a9ff41f6621f78f30d0307ffb8c3: Waiting for close lock at 1731556788674Running coprocessor pre-close hooks at 1731556788674Disabling compacts and flushes for region at 1731556788674Disabling writes for close at 1731556788674Writing region close event to WAL at 1731556788758 (+84 ms)Running coprocessor post-close hooks at 1731556788767 (+9 ms)Closed at 1731556788767 2024-11-14T03:59:48,767 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731556715486.a528a9ff41f6621f78f30d0307ffb8c3. 2024-11-14T03:59:48,767 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 504095cd6401df6643acc3f612334809, disabling compactions & flushes 2024-11-14T03:59:48,767 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:59:48,767 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:59:48,767 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. after waiting 0 ms 2024-11-14T03:59:48,767 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:59:48,768 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c->hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/817cba3f933a29fea91e92b5cfc2ff1c/info/73cf57353b9e4064a5f5a88acddddad3-bottom] to archive 2024-11-14T03:59:48,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T03:59:48,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c to hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/archive/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/info/73cf57353b9e4064a5f5a88acddddad3.817cba3f933a29fea91e92b5cfc2ff1c 2024-11-14T03:59:48,772 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-14T03:59:48,779 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/data/default/TestLogRolling-testLogRolling/504095cd6401df6643acc3f612334809/recovered.edits/115.seqid, newMaxSeqId=115, maxSeqId=110 2024-11-14T03:59:48,780 INFO [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:59:48,780 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 504095cd6401df6643acc3f612334809: Waiting for close lock at 1731556788767Running coprocessor pre-close hooks at 1731556788767Disabling compacts and flushes for region at 1731556788767Disabling writes for close at 1731556788767Writing region close event to WAL at 1731556788773 (+6 ms)Running coprocessor post-close hooks at 1731556788780 (+7 ms)Closed at 1731556788780 2024-11-14T03:59:48,780 DEBUG [RS_CLOSE_REGION-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731556715486.504095cd6401df6643acc3f612334809. 2024-11-14T03:59:48,881 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,40421,1731556688247; all regions closed. 2024-11-14T03:59:48,882 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,882 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741834_1010 (size=8107) 2024-11-14T03:59:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741834_1010 (size=8107) 2024-11-14T03:59:48,888 DEBUG [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/oldWALs 2024-11-14T03:59:48,888 INFO [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C40421%2C1731556688247.meta:.meta(num 1731556690396) 2024-11-14T03:59:48,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,889 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,889 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,889 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,889 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:48,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741876_1052 (size=778) 2024-11-14T03:59:48,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741876_1052 (size=778) 2024-11-14T03:59:48,893 DEBUG [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/oldWALs 2024-11-14T03:59:48,893 INFO [RS:0;6a989be77d39:40421 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C40421%2C1731556688247:(num 1731556788240) 2024-11-14T03:59:48,893 DEBUG [RS:0;6a989be77d39:40421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:48,893 INFO [RS:0;6a989be77d39:40421 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:59:48,893 INFO [RS:0;6a989be77d39:40421 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:59:48,893 INFO [RS:0;6a989be77d39:40421 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T03:59:48,893 INFO [RS:0;6a989be77d39:40421 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:59:48,894 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:59:48,894 INFO [RS:0;6a989be77d39:40421 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40421 2024-11-14T03:59:48,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,40421,1731556688247 2024-11-14T03:59:48,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:59:48,896 INFO [RS:0;6a989be77d39:40421 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:59:48,897 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,40421,1731556688247] 2024-11-14T03:59:48,898 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,40421,1731556688247 already deleted, retry=false 2024-11-14T03:59:48,898 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,40421,1731556688247 expired; onlineServers=0 2024-11-14T03:59:48,899 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,46343,1731556687906' ***** 2024-11-14T03:59:48,899 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:59:48,899 INFO [M:0;6a989be77d39:46343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:59:48,899 INFO [M:0;6a989be77d39:46343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:59:48,899 DEBUG [M:0;6a989be77d39:46343 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:59:48,899 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:59:48,899 DEBUG [M:0;6a989be77d39:46343 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:59:48,899 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556689385 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556689385,5,FailOnTimeoutGroup] 2024-11-14T03:59:48,899 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556689390 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556689390,5,FailOnTimeoutGroup] 2024-11-14T03:59:48,899 INFO [M:0;6a989be77d39:46343 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:59:48,899 INFO [M:0;6a989be77d39:46343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:59:48,899 DEBUG [M:0;6a989be77d39:46343 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:59:48,900 INFO [M:0;6a989be77d39:46343 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:59:48,900 INFO [M:0;6a989be77d39:46343 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:59:48,900 INFO [M:0;6a989be77d39:46343 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:59:48,900 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:59:48,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:59:48,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:48,901 DEBUG [M:0;6a989be77d39:46343 {}] zookeeper.ZKUtil(347): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T03:59:48,901 WARN [M:0;6a989be77d39:46343 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T03:59:48,902 INFO [M:0;6a989be77d39:46343 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/.lastflushedseqids 2024-11-14T03:59:48,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741877_1053 (size=228) 2024-11-14T03:59:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741877_1053 (size=228) 2024-11-14T03:59:48,924 INFO [M:0;6a989be77d39:46343 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:59:48,924 INFO [M:0;6a989be77d39:46343 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:59:48,924 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:59:48,924 INFO [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:48,924 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:48,924 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:59:48,924 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:48,925 INFO [M:0;6a989be77d39:46343 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-14T03:59:48,950 DEBUG [M:0;6a989be77d39:46343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13b30592d26544ebbc535c1dfe381cb5 is 82, key is hbase:meta,,1/info:regioninfo/1731556690559/Put/seqid=0 2024-11-14T03:59:48,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741878_1054 (size=5672) 2024-11-14T03:59:48,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741878_1054 (size=5672) 2024-11-14T03:59:48,964 INFO [M:0;6a989be77d39:46343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13b30592d26544ebbc535c1dfe381cb5 2024-11-14T03:59:48,996 DEBUG [M:0;6a989be77d39:46343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c363d6cae65942d1abda1dd928141f7f is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731556691375/Put/seqid=0 2024-11-14T03:59:48,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:48,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40421-0x100ef6534890001, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:48,999 INFO [RS:0;6a989be77d39:40421 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:59:48,999 INFO [RS:0;6a989be77d39:40421 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,40421,1731556688247; zookeeper connection closed. 2024-11-14T03:59:49,002 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a8b6ec8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a8b6ec8 2024-11-14T03:59:49,004 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T03:59:49,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741879_1055 (size=7091) 2024-11-14T03:59:49,044 INFO [M:0;6a989be77d39:46343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c363d6cae65942d1abda1dd928141f7f 2024-11-14T03:59:49,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741879_1055 (size=7091) 2024-11-14T03:59:49,049 INFO [M:0;6a989be77d39:46343 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c363d6cae65942d1abda1dd928141f7f 2024-11-14T03:59:49,068 DEBUG [M:0;6a989be77d39:46343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0488e85419a2477dada4a49410f3bc4f is 69, key is 6a989be77d39,40421,1731556688247/rs:state/1731556689457/Put/seqid=0 2024-11-14T03:59:49,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741880_1056 (size=5156) 2024-11-14T03:59:49,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741880_1056 (size=5156) 2024-11-14T03:59:49,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:49,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:49,500 INFO [M:0;6a989be77d39:46343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0488e85419a2477dada4a49410f3bc4f 2024-11-14T03:59:49,550 DEBUG [M:0;6a989be77d39:46343 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4db772bfeca741ceaf18f5dbde02065a is 52, key is load_balancer_on/state:d/1731556690753/Put/seqid=0 2024-11-14T03:59:49,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741881_1057 (size=5056) 2024-11-14T03:59:49,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741881_1057 (size=5056) 2024-11-14T03:59:49,574 INFO [M:0;6a989be77d39:46343 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4db772bfeca741ceaf18f5dbde02065a 2024-11-14T03:59:49,588 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13b30592d26544ebbc535c1dfe381cb5 as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/13b30592d26544ebbc535c1dfe381cb5 2024-11-14T03:59:49,596 INFO [M:0;6a989be77d39:46343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/13b30592d26544ebbc535c1dfe381cb5, entries=8, sequenceid=125, filesize=5.5 K 2024-11-14T03:59:49,598 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c363d6cae65942d1abda1dd928141f7f as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c363d6cae65942d1abda1dd928141f7f 2024-11-14T03:59:49,615 INFO [M:0;6a989be77d39:46343 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c363d6cae65942d1abda1dd928141f7f 2024-11-14T03:59:49,616 INFO [M:0;6a989be77d39:46343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c363d6cae65942d1abda1dd928141f7f, entries=13, sequenceid=125, filesize=6.9 K 2024-11-14T03:59:49,617 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0488e85419a2477dada4a49410f3bc4f as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0488e85419a2477dada4a49410f3bc4f 2024-11-14T03:59:49,619 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:59:49,624 INFO [M:0;6a989be77d39:46343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0488e85419a2477dada4a49410f3bc4f, entries=1, sequenceid=125, filesize=5.0 K 2024-11-14T03:59:49,634 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4db772bfeca741ceaf18f5dbde02065a as hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4db772bfeca741ceaf18f5dbde02065a 2024-11-14T03:59:49,646 INFO [M:0;6a989be77d39:46343 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38151/user/jenkins/test-data/184da31e-3df3-75f9-c2be-22883cb66454/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4db772bfeca741ceaf18f5dbde02065a, entries=1, sequenceid=125, filesize=4.9 K 2024-11-14T03:59:49,649 INFO [M:0;6a989be77d39:46343 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 725ms, sequenceid=125, compaction requested=false 2024-11-14T03:59:49,698 INFO [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:49,698 DEBUG [M:0;6a989be77d39:46343 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556788924Disabling compacts and flushes for region at 1731556788924Disabling writes for close at 1731556788924Obtaining lock to block concurrent updates at 1731556788925 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556788925Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731556788925Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556788926 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556788926Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556788949 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556788950 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556788970 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556788995 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556788995Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556789049 (+54 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556789067 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556789067Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556789507 (+440 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556789542 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556789542Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16cb0e21: reopening flushed file at 1731556789583 (+41 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3266282c: reopening flushed file at 1731556789597 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@de6bbf3: reopening flushed file at 1731556789616 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61bf619c: reopening flushed file at 1731556789625 (+9 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 725ms, sequenceid=125, compaction requested=false at 1731556789649 (+24 ms)Writing region close event to WAL at 1731556789697 (+48 ms)Closed at 1731556789697 2024-11-14T03:59:49,711 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:49,717 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:49,717 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:49,718 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:49,718 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:49,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40871 is added to blk_1073741830_1006 (size=61332) 2024-11-14T03:59:49,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38303 is added to blk_1073741830_1006 (size=61332) 2024-11-14T03:59:49,752 INFO [M:0;6a989be77d39:46343 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:59:49,752 INFO [M:0;6a989be77d39:46343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46343 2024-11-14T03:59:49,752 INFO [M:0;6a989be77d39:46343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:59:49,753 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:59:49,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:49,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46343-0x100ef6534890000, quorum=127.0.0.1:64410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:49,859 INFO [M:0;6a989be77d39:46343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:59:49,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f25c1cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:59:49,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cfa2328{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:59:49,912 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:59:49,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@652d6e37{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:59:49,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43909889{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir/,STOPPED} 2024-11-14T03:59:49,926 WARN [BP-1496562693-172.17.0.2-1731556686657 heartbeating to localhost/127.0.0.1:38151 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:59:49,926 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:59:49,926 WARN [BP-1496562693-172.17.0.2-1731556686657 heartbeating to localhost/127.0.0.1:38151 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1496562693-172.17.0.2-1731556686657 (Datanode Uuid df3874e3-3dfe-49c6-b711-a17e6716c3c8) service to localhost/127.0.0.1:38151 2024-11-14T03:59:49,926 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:59:49,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data3/current/BP-1496562693-172.17.0.2-1731556686657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:49,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data4/current/BP-1496562693-172.17.0.2-1731556686657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:49,927 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:59:49,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d6e0cf8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:59:49,943 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34e466bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:59:49,943 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:59:49,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79b422de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:59:49,943 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cb7e6ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir/,STOPPED} 2024-11-14T03:59:49,948 WARN [BP-1496562693-172.17.0.2-1731556686657 heartbeating to localhost/127.0.0.1:38151 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:59:49,948 WARN [BP-1496562693-172.17.0.2-1731556686657 heartbeating to localhost/127.0.0.1:38151 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1496562693-172.17.0.2-1731556686657 (Datanode Uuid ea66442f-9a68-49e2-ab4e-c98b8063e361) service to localhost/127.0.0.1:38151 2024-11-14T03:59:49,949 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:59:49,949 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:59:49,949 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data2/current/BP-1496562693-172.17.0.2-1731556686657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:49,949 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/cluster_b2779afe-083d-1fbc-0d5e-ecfec2814715/data/data1/current/BP-1496562693-172.17.0.2-1731556686657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:49,955 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:59:49,985 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a0ef025{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:59:49,987 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47a053ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:59:49,987 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:59:49,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28903d6d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:59:49,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11e6950{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir/,STOPPED} 2024-11-14T03:59:50,022 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:59:50,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:59:50,105 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=228 (was 207) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38151 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38151 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38151 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38151 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38151 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38151 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38151 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38151 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=506 (was 481) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=601 (was 573) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5050 (was 4640) - AvailableMemoryMB LEAK? - 2024-11-14T03:59:50,126 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=228, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=601, ProcessCount=11, AvailableMemoryMB=5049 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.log.dir so I do NOT create it in target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fac4be86-6ada-ea61-689f-4adf84ff9243/hadoop.tmp.dir so I do NOT create it in target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550, deleteOnExit=true 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/test.cache.data in system properties and HBase conf 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir in system properties and HBase conf 2024-11-14T03:59:50,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T03:59:50,128 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T03:59:50,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/nfs.dump.dir in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/java.io.tmpdir in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T03:59:50,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T03:59:50,152 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:59:50,291 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:59:50,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:59:50,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:50,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:59:50,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:59:50,330 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:59:50,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:50,335 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:59:50,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34b79769{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:59:50,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34f5471c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:59:50,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1802ed83{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/java.io.tmpdir/jetty-localhost-43807-hadoop-hdfs-3_4_1-tests_jar-_-any-9873273303182637830/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:59:50,511 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60c97719{HTTP/1.1, (http/1.1)}{localhost:43807} 2024-11-14T03:59:50,511 INFO [Time-limited test {}] server.Server(415): Started @346523ms 2024-11-14T03:59:50,534 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T03:59:50,733 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:59:50,746 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:59:50,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:59:50,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:59:50,783 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T03:59:50,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444363ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:59:50,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186dc28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:59:50,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:59:50,874 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T03:59:50,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T03:59:50,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T03:59:50,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b01e479{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/java.io.tmpdir/jetty-localhost-46383-hadoop-hdfs-3_4_1-tests_jar-_-any-10164166909237247713/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:59:50,978 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@483384c3{HTTP/1.1, (http/1.1)}{localhost:46383} 2024-11-14T03:59:50,978 INFO [Time-limited test {}] server.Server(415): Started @346990ms 2024-11-14T03:59:50,979 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:59:51,217 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T03:59:51,228 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T03:59:51,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T03:59:51,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T03:59:51,242 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T03:59:51,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@349fe29e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir/,AVAILABLE} 2024-11-14T03:59:51,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e33487{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T03:59:51,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:51,330 WARN [Thread-2455 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data1/current/BP-52747132-172.17.0.2-1731556790168/current, will proceed with Du for space computation calculation, 2024-11-14T03:59:51,331 WARN [Thread-2456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data2/current/BP-52747132-172.17.0.2-1731556790168/current, will proceed with Du for space computation calculation, 2024-11-14T03:59:51,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:51,466 WARN [Thread-2434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:59:51,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9268180{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/java.io.tmpdir/jetty-localhost-36529-hadoop-hdfs-3_4_1-tests_jar-_-any-7204867718020703881/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:59:51,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@372f7d77{HTTP/1.1, (http/1.1)}{localhost:36529} 2024-11-14T03:59:51,470 INFO [Time-limited test {}] server.Server(415): Started @347482ms 2024-11-14T03:59:51,472 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T03:59:51,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x466fd2c4291faa2e with lease ID 0x9365e9f22ab7a461: Processing first storage report for DS-46d86a3e-a63d-4740-b164-3025585876ba from datanode DatanodeRegistration(127.0.0.1:44059, datanodeUuid=56cb16dc-553f-41e4-8ed8-2f79c089d510, infoPort=35207, infoSecurePort=0, ipcPort=42431, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168) 2024-11-14T03:59:51,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x466fd2c4291faa2e with lease ID 0x9365e9f22ab7a461: from storage DS-46d86a3e-a63d-4740-b164-3025585876ba node DatanodeRegistration(127.0.0.1:44059, datanodeUuid=56cb16dc-553f-41e4-8ed8-2f79c089d510, infoPort=35207, infoSecurePort=0, ipcPort=42431, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:59:51,489 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x466fd2c4291faa2e with lease ID 0x9365e9f22ab7a461: Processing first storage report for DS-5c121761-1d4d-4ed3-be95-b1111300124c from datanode DatanodeRegistration(127.0.0.1:44059, datanodeUuid=56cb16dc-553f-41e4-8ed8-2f79c089d510, infoPort=35207, infoSecurePort=0, ipcPort=42431, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168) 2024-11-14T03:59:51,489 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x466fd2c4291faa2e with lease ID 0x9365e9f22ab7a461: from storage DS-5c121761-1d4d-4ed3-be95-b1111300124c node DatanodeRegistration(127.0.0.1:44059, datanodeUuid=56cb16dc-553f-41e4-8ed8-2f79c089d510, infoPort=35207, infoSecurePort=0, ipcPort=42431, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:59:52,256 WARN [Thread-2481 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data3/current/BP-52747132-172.17.0.2-1731556790168/current, will proceed with Du for space computation calculation, 2024-11-14T03:59:52,256 WARN [Thread-2482 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data4/current/BP-52747132-172.17.0.2-1731556790168/current, will proceed with Du for space computation calculation, 2024-11-14T03:59:52,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:52,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:52,373 WARN [Thread-2470 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T03:59:52,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x46bd19e12913b772 with lease ID 0x9365e9f22ab7a462: Processing first storage report for DS-3a5c08ae-dc48-456d-8914-e28824e5ee8e from datanode DatanodeRegistration(127.0.0.1:36647, datanodeUuid=b922e687-ca70-43ea-a736-9905644e2286, infoPort=44441, infoSecurePort=0, ipcPort=45653, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168) 2024-11-14T03:59:52,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x46bd19e12913b772 with lease ID 0x9365e9f22ab7a462: from storage DS-3a5c08ae-dc48-456d-8914-e28824e5ee8e node DatanodeRegistration(127.0.0.1:36647, datanodeUuid=b922e687-ca70-43ea-a736-9905644e2286, infoPort=44441, infoSecurePort=0, ipcPort=45653, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:59:52,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x46bd19e12913b772 with lease ID 0x9365e9f22ab7a462: Processing first storage report for DS-e3398774-a21d-4c76-bb58-9b4b0bab7f74 from datanode DatanodeRegistration(127.0.0.1:36647, datanodeUuid=b922e687-ca70-43ea-a736-9905644e2286, infoPort=44441, infoSecurePort=0, ipcPort=45653, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168) 2024-11-14T03:59:52,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x46bd19e12913b772 with lease ID 0x9365e9f22ab7a462: from storage DS-e3398774-a21d-4c76-bb58-9b4b0bab7f74 node DatanodeRegistration(127.0.0.1:36647, datanodeUuid=b922e687-ca70-43ea-a736-9905644e2286, infoPort=44441, infoSecurePort=0, ipcPort=45653, storageInfo=lv=-57;cid=testClusterID;nsid=1415863539;c=1731556790168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T03:59:52,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c 2024-11-14T03:59:52,487 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/zookeeper_0, clientPort=60098, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T03:59:52,495 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60098 2024-11-14T03:59:52,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:52,497 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:52,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:59:52,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741825_1001 (size=7) 2024-11-14T03:59:52,594 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4 with version=8 2024-11-14T03:59:52,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45187/user/jenkins/test-data/bda90a20-bdd5-6fe4-f894-30e70f9bde4d/hbase-staging 2024-11-14T03:59:52,597 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:59:52,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:59:52,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:59:52,597 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:59:52,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:59:52,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:59:52,598 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T03:59:52,598 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:59:52,599 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42509 2024-11-14T03:59:52,600 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42509 connecting to ZooKeeper ensemble=127.0.0.1:60098 2024-11-14T03:59:52,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:425090x0, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:59:52,637 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42509-0x100ef66cd450000 connected 2024-11-14T03:59:52,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:52,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:52,756 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:59:52,757 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4, hbase.cluster.distributed=false 2024-11-14T03:59:52,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:59:52,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42509 2024-11-14T03:59:52,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42509 2024-11-14T03:59:52,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42509 2024-11-14T03:59:52,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42509 2024-11-14T03:59:52,825 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42509 2024-11-14T03:59:52,851 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6a989be77d39:0 server-side Connection retries=45 2024-11-14T03:59:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:59:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T03:59:52,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T03:59:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T03:59:52,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T03:59:52,852 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T03:59:52,852 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T03:59:52,866 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33623 2024-11-14T03:59:52,867 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33623 connecting to ZooKeeper ensemble=127.0.0.1:60098 2024-11-14T03:59:52,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:52,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:52,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336230x0, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T03:59:52,909 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:336230x0, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:59:52,909 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T03:59:52,913 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33623-0x100ef66cd450001 connected 2024-11-14T03:59:52,926 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T03:59:52,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T03:59:52,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T03:59:52,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33623 2024-11-14T03:59:52,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33623 2024-11-14T03:59:52,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33623 2024-11-14T03:59:52,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33623 2024-11-14T03:59:52,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33623 2024-11-14T03:59:53,020 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a989be77d39:42509 2024-11-14T03:59:53,037 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:59:53,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:59:53,053 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T03:59:53,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,066 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T03:59:53,079 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a989be77d39,42509,1731556792597 from backup master directory 2024-11-14T03:59:53,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:59:53,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T03:59:53,084 WARN [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:59:53,084 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,101 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/hbase.id] with ID: 7e260771-6820-47b3-b13f-ff0528a7a8d5 2024-11-14T03:59:53,102 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/.tmp/hbase.id 2024-11-14T03:59:53,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:59:53,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741826_1002 (size=42) 2024-11-14T03:59:53,178 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/.tmp/hbase.id]:[hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/hbase.id] 2024-11-14T03:59:53,224 INFO [master/6a989be77d39:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:53,224 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T03:59:53,227 INFO [master/6a989be77d39:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-14T03:59:53,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:53,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:53,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:59:53,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741827_1003 (size=196) 2024-11-14T03:59:53,355 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T03:59:53,356 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T03:59:53,357 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:59:53,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:59:53,389 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store 2024-11-14T03:59:53,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741828_1004 (size=1189) 2024-11-14T03:59:53,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:59:53,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741829_1005 (size=34) 2024-11-14T03:59:53,448 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:59:53,448 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:59:53,448 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:53,448 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:53,448 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:59:53,448 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:53,448 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:53,448 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556793448Disabling compacts and flushes for region at 1731556793448Disabling writes for close at 1731556793448Writing region close event to WAL at 1731556793448Closed at 1731556793448 2024-11-14T03:59:53,450 WARN [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/.initializing 2024-11-14T03:59:53,450 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/WALs/6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,455 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C42509%2C1731556792597, suffix=, logDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/WALs/6a989be77d39,42509,1731556792597, archiveDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/oldWALs, maxLogs=10 2024-11-14T03:59:53,455 INFO [master/6a989be77d39:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C42509%2C1731556792597.1731556793455 2024-11-14T03:59:53,503 INFO [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/WALs/6a989be77d39,42509,1731556792597/6a989be77d39%2C42509%2C1731556792597.1731556793455 2024-11-14T03:59:53,526 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44441:44441),(127.0.0.1/127.0.0.1:35207:35207)] 2024-11-14T03:59:53,534 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:59:53,534 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:59:53,534 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,534 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T03:59:53,560 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:53,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T03:59:53,564 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:59:53,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T03:59:53,566 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:59:53,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T03:59:53,576 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T03:59:53,577 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,578 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,579 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,582 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,582 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,582 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T03:59:53,584 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T03:59:53,596 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:59:53,597 INFO [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698180, jitterRate=-0.11221872270107269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T03:59:53,598 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731556793535Initializing all the Stores at 1731556793537 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556793537Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556793550 (+13 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556793550Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556793550Cleaning up temporary data from old regions at 1731556793582 (+32 ms)Region opened successfully at 1731556793598 (+16 ms) 2024-11-14T03:59:53,605 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T03:59:53,618 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@266c43f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:59:53,619 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T03:59:53,619 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T03:59:53,619 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T03:59:53,620 INFO [master/6a989be77d39:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T03:59:53,620 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T03:59:53,622 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 1 msec 2024-11-14T03:59:53,622 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T03:59:53,638 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T03:59:53,639 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T03:59:53,641 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T03:59:53,642 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T03:59:53,643 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T03:59:53,644 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T03:59:53,645 INFO [master/6a989be77d39:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T03:59:53,658 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T03:59:53,661 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T03:59:53,662 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T03:59:53,665 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T03:59:53,667 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T03:59:53,669 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T03:59:53,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:59:53,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T03:59:53,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,672 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6a989be77d39,42509,1731556792597, sessionid=0x100ef66cd450000, setting cluster-up flag (Was=false) 2024-11-14T03:59:53,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,684 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T03:59:53,687 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:53,708 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T03:59:53,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,717 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a989be77d39,42509,1731556792597 2024-11-14T03:59:53,730 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T03:59:53,739 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T03:59:53,739 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T03:59:53,740 INFO [master/6a989be77d39:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T03:59:53,740 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a989be77d39,42509,1731556792597 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T03:59:53,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a989be77d39:0, corePoolSize=5, maxPoolSize=5 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a989be77d39:0, corePoolSize=10, maxPoolSize=10 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:59:53,752 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:53,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:53,782 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:59:53,782 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T03:59:53,784 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,784 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731556823824 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T03:59:53,824 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T03:59:53,849 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:53,851 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T03:59:53,851 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T03:59:53,851 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T03:59:53,874 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(746): ClusterId : 7e260771-6820-47b3-b13f-ff0528a7a8d5 2024-11-14T03:59:53,874 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T03:59:53,878 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T03:59:53,878 INFO [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T03:59:53,878 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556793878,5,FailOnTimeoutGroup] 2024-11-14T03:59:53,885 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556793878,5,FailOnTimeoutGroup] 2024-11-14T03:59:53,885 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:53,885 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T03:59:53,885 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:53,885 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:53,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:59:53,887 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T03:59:53,887 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T03:59:53,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741831_1007 (size=1321) 2024-11-14T03:59:53,892 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T03:59:53,892 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4 2024-11-14T03:59:53,906 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T03:59:53,906 DEBUG [RS:0;6a989be77d39:33623 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cfc6e81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a989be77d39/172.17.0.2:0 2024-11-14T03:59:53,947 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a989be77d39:33623 2024-11-14T03:59:53,947 INFO [RS:0;6a989be77d39:33623 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T03:59:53,947 INFO [RS:0;6a989be77d39:33623 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T03:59:53,947 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T03:59:53,957 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(2659): reportForDuty to master=6a989be77d39,42509,1731556792597 with port=33623, startcode=1731556792851 2024-11-14T03:59:53,958 DEBUG [RS:0;6a989be77d39:33623 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T03:59:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:59:53,970 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T03:59:53,971 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6a989be77d39,33623,1731556792851 2024-11-14T03:59:53,971 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42509 {}] master.ServerManager(517): Registering regionserver=6a989be77d39,33623,1731556792851 2024-11-14T03:59:53,973 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4 2024-11-14T03:59:53,973 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40587 2024-11-14T03:59:53,973 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T03:59:53,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741832_1008 (size=32) 2024-11-14T03:59:53,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:59:53,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:59:53,985 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a989be77d39,33623,1731556792851] 2024-11-14T03:59:53,985 DEBUG [RS:0;6a989be77d39:33623 {}] zookeeper.ZKUtil(111): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a989be77d39,33623,1731556792851 2024-11-14T03:59:53,985 WARN [RS:0;6a989be77d39:33623 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T03:59:53,985 INFO [RS:0;6a989be77d39:33623 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:59:53,985 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/6a989be77d39,33623,1731556792851 2024-11-14T03:59:53,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:59:53,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:59:53,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:53,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:59:53,991 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:59:53,991 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:53,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:59:53,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:59:53,998 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:53,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:53,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:59:54,000 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:59:54,000 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:54,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:54,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:59:54,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740 2024-11-14T03:59:54,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740 2024-11-14T03:59:54,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:59:54,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:59:54,004 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:59:54,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:59:54,017 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T03:59:54,021 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797130, jitterRate=0.013603419065475464}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:59:54,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731556793975Initializing all the Stores at 1731556793985 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556793985Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556793986 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556793986Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556793986Cleaning up temporary data from old regions at 1731556794003 (+17 ms)Region opened successfully at 1731556794075 (+72 ms) 2024-11-14T03:59:54,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:59:54,075 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:59:54,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:59:54,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:59:54,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:59:54,077 INFO [RS:0;6a989be77d39:33623 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T03:59:54,081 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:59:54,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556794075Disabling compacts and flushes for region at 1731556794075Disabling writes for close at 1731556794075Writing region close event to WAL at 1731556794081 (+6 ms)Closed at 1731556794081 2024-11-14T03:59:54,089 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:59:54,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T03:59:54,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T03:59:54,094 INFO [RS:0;6a989be77d39:33623 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T03:59:54,095 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:59:54,096 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T03:59:54,101 INFO [RS:0;6a989be77d39:33623 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T03:59:54,101 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,102 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T03:59:54,109 INFO [RS:0;6a989be77d39:33623 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T03:59:54,109 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a989be77d39:0, corePoolSize=2, maxPoolSize=2 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a989be77d39:0, corePoolSize=1, maxPoolSize=1 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:59:54,110 DEBUG [RS:0;6a989be77d39:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a989be77d39:0, corePoolSize=3, maxPoolSize=3 2024-11-14T03:59:54,125 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,125 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,125 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,125 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,125 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,125 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,33623,1731556792851-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:59:54,171 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T03:59:54,172 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,33623,1731556792851-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,172 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,172 INFO [RS:0;6a989be77d39:33623 {}] regionserver.Replication(171): 6a989be77d39,33623,1731556792851 started 2024-11-14T03:59:54,194 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,194 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1482): Serving as 6a989be77d39,33623,1731556792851, RpcServer on 6a989be77d39/172.17.0.2:33623, sessionid=0x100ef66cd450001 2024-11-14T03:59:54,194 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T03:59:54,194 DEBUG [RS:0;6a989be77d39:33623 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a989be77d39,33623,1731556792851 2024-11-14T03:59:54,194 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,33623,1731556792851' 2024-11-14T03:59:54,194 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T03:59:54,198 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T03:59:54,208 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T03:59:54,208 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T03:59:54,208 DEBUG [RS:0;6a989be77d39:33623 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a989be77d39,33623,1731556792851 2024-11-14T03:59:54,208 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a989be77d39,33623,1731556792851' 2024-11-14T03:59:54,208 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T03:59:54,211 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T03:59:54,212 DEBUG [RS:0;6a989be77d39:33623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T03:59:54,212 INFO [RS:0;6a989be77d39:33623 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T03:59:54,212 INFO [RS:0;6a989be77d39:33623 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T03:59:54,247 WARN [6a989be77d39:42509 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T03:59:54,271 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T03:59:54,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:54,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:54,346 INFO [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C33623%2C1731556792851, suffix=, logDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/6a989be77d39,33623,1731556792851, archiveDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs, maxLogs=32 2024-11-14T03:59:54,347 INFO [RS:0;6a989be77d39:33623 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C33623%2C1731556792851.1731556794347 2024-11-14T03:59:54,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T03:59:54,437 INFO [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/6a989be77d39,33623,1731556792851/6a989be77d39%2C33623%2C1731556792851.1731556794347 2024-11-14T03:59:54,449 DEBUG [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44441:44441),(127.0.0.1/127.0.0.1:35207:35207)] 2024-11-14T03:59:54,497 DEBUG [6a989be77d39:42509 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T03:59:54,498 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a989be77d39,33623,1731556792851 2024-11-14T03:59:54,503 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,33623,1731556792851, state=OPENING 2024-11-14T03:59:54,509 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T03:59:54,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:54,512 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:59:54,519 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T03:59:54,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,33623,1731556792851}] 2024-11-14T03:59:54,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:54,521 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:59:54,678 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T03:59:54,690 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53661, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T03:59:54,709 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T03:59:54,710 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:59:54,737 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a989be77d39%2C33623%2C1731556792851.meta, suffix=.meta, logDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/6a989be77d39,33623,1731556792851, archiveDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs, maxLogs=32 2024-11-14T03:59:54,738 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6a989be77d39%2C33623%2C1731556792851.meta.1731556794738.meta 2024-11-14T03:59:54,810 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/6a989be77d39,33623,1731556792851/6a989be77d39%2C33623%2C1731556792851.meta.1731556794738.meta 2024-11-14T03:59:54,824 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44441:44441),(127.0.0.1/127.0.0.1:35207:35207)] 2024-11-14T03:59:54,829 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T03:59:54,830 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T03:59:54,830 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T03:59:54,830 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T03:59:54,830 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T03:59:54,830 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T03:59:54,830 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T03:59:54,830 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T03:59:54,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T03:59:54,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T03:59:54,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:54,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:54,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T03:59:54,841 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T03:59:54,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:54,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:54,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T03:59:54,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T03:59:54,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:54,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:54,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T03:59:54,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T03:59:54,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T03:59:54,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T03:59:54,854 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T03:59:54,856 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740 2024-11-14T03:59:54,870 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740 2024-11-14T03:59:54,872 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T03:59:54,872 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T03:59:54,872 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T03:59:54,875 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T03:59:54,876 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855658, jitterRate=0.08802665770053864}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T03:59:54,876 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T03:59:54,877 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731556794831Writing region info on filesystem at 1731556794831Initializing all the Stores at 1731556794832 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556794832Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556794833 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731556794833Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731556794833Cleaning up temporary data from old regions at 1731556794872 (+39 ms)Running coprocessor post-open hooks at 1731556794876 (+4 ms)Region opened successfully at 1731556794877 (+1 ms) 2024-11-14T03:59:54,884 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731556794677 2024-11-14T03:59:54,887 DEBUG [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T03:59:54,887 INFO [RS_OPEN_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T03:59:54,887 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a989be77d39,33623,1731556792851 2024-11-14T03:59:54,889 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a989be77d39,33623,1731556792851, state=OPEN 2024-11-14T03:59:54,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:59:54,894 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:59:54,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T03:59:54,895 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6a989be77d39,33623,1731556792851 2024-11-14T03:59:54,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T03:59:54,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T03:59:54,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6a989be77d39,33623,1731556792851 in 376 msec 2024-11-14T03:59:54,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T03:59:54,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 809 msec 2024-11-14T03:59:54,903 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T03:59:54,903 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T03:59:54,914 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:59:54,914 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,33623,1731556792851, seqNum=-1] 2024-11-14T03:59:54,914 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:59:54,916 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37869, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:59:54,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1870 sec 2024-11-14T03:59:54,922 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731556794922, completionTime=-1 2024-11-14T03:59:54,923 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T03:59:54,923 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731556854925 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731556914925 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,42509,1731556792597-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,42509,1731556792597-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,925 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,42509,1731556792597-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,926 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a989be77d39:42509, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,926 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,926 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,929 DEBUG [master/6a989be77d39:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T03:59:54,939 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.855sec 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,42509,1731556792597-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T03:59:54,940 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,42509,1731556792597-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T03:59:54,943 DEBUG [master/6a989be77d39:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T03:59:54,943 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T03:59:54,943 INFO [master/6a989be77d39:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a989be77d39,42509,1731556792597-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T03:59:54,990 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5982a43f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:59:54,990 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6a989be77d39,42509,-1 for getting cluster id 2024-11-14T03:59:54,990 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T03:59:54,996 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7e260771-6820-47b3-b13f-ff0528a7a8d5' 2024-11-14T03:59:54,996 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T03:59:54,996 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7e260771-6820-47b3-b13f-ff0528a7a8d5" 2024-11-14T03:59:54,996 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@730adc2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:59:54,996 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6a989be77d39,42509,-1] 2024-11-14T03:59:54,997 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T03:59:54,997 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:54,998 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T03:59:54,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@219dec45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T03:59:55,000 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T03:59:55,001 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6a989be77d39,33623,1731556792851, seqNum=-1] 2024-11-14T03:59:55,001 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T03:59:55,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33850, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T03:59:55,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6a989be77d39,42509,1731556792597 2024-11-14T03:59:55,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T03:59:55,008 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T03:59:55,008 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T03:59:55,010 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs, maxLogs=32 2024-11-14T03:59:55,011 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731556795010 2024-11-14T03:59:55,034 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731556795010 2024-11-14T03:59:55,036 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44441:44441),(127.0.0.1/127.0.0.1:35207:35207)] 2024-11-14T03:59:55,037 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731556795037 2024-11-14T03:59:55,046 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,047 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,047 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,047 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,047 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,047 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731556795010 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731556795037 2024-11-14T03:59:55,049 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44441:44441),(127.0.0.1/127.0.0.1:35207:35207)] 2024-11-14T03:59:55,049 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731556795010 is not closed yet, will try archiving it next time 2024-11-14T03:59:55,050 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,051 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,051 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,051 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,051 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741835_1011 (size=93) 2024-11-14T03:59:55,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741835_1011 (size=93) 2024-11-14T03:59:55,057 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/WALs/test.com,8080,1/test.com%2C8080%2C1.1731556795010 to hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs/test.com%2C8080%2C1.1731556795010 2024-11-14T03:59:55,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741836_1012 (size=93) 2024-11-14T03:59:55,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741836_1012 (size=93) 2024-11-14T03:59:55,065 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs 2024-11-14T03:59:55,065 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731556795037) 2024-11-14T03:59:55,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T03:59:55,066 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:59:55,066 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:59:55,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:55,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:55,066 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T03:59:55,066 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T03:59:55,066 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=188018448, stopped=false 2024-11-14T03:59:55,066 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6a989be77d39,42509,1731556792597 2024-11-14T03:59:55,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:59:55,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:55,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T03:59:55,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:55,069 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:59:55,069 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:59:55,069 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T03:59:55,069 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T03:59:55,070 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:59:55,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:55,070 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6a989be77d39,33623,1731556792851' ***** 2024-11-14T03:59:55,070 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T03:59:55,071 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(959): stopping server 6a989be77d39,33623,1731556792851 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6a989be77d39:33623. 2024-11-14T03:59:55,071 DEBUG [RS:0;6a989be77d39:33623 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T03:59:55,071 DEBUG [RS:0;6a989be77d39:33623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:55,071 INFO [RS:0;6a989be77d39:33623 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T03:59:55,073 INFO [RS:0;6a989be77d39:33623 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T03:59:55,073 INFO [RS:0;6a989be77d39:33623 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T03:59:55,073 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T03:59:55,074 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T03:59:55,074 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T03:59:55,075 DEBUG [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T03:59:55,075 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T03:59:55,075 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T03:59:55,075 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T03:59:55,075 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T03:59:55,075 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T03:59:55,076 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T03:59:55,100 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/.tmp/ns/1c41f33a67ea4ac1a44a3a615e942e43 is 43, key is default/ns:d/1731556794917/Put/seqid=0 2024-11-14T03:59:55,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741837_1013 (size=5153) 2024-11-14T03:59:55,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741837_1013 (size=5153) 2024-11-14T03:59:55,116 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/.tmp/ns/1c41f33a67ea4ac1a44a3a615e942e43 2024-11-14T03:59:55,126 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T03:59:55,126 INFO [regionserver/6a989be77d39:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T03:59:55,123 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/.tmp/ns/1c41f33a67ea4ac1a44a3a615e942e43 as hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/ns/1c41f33a67ea4ac1a44a3a615e942e43 2024-11-14T03:59:55,143 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/ns/1c41f33a67ea4ac1a44a3a615e942e43, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T03:59:55,144 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 69ms, sequenceid=6, compaction requested=false 2024-11-14T03:59:55,144 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T03:59:55,178 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T03:59:55,186 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T03:59:55,186 INFO [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T03:59:55,186 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731556795075Running coprocessor pre-close hooks at 1731556795075Disabling compacts and flushes for region at 1731556795075Disabling writes for close at 1731556795075Obtaining lock to block concurrent updates at 1731556795076 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731556795076Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731556795076Flushing stores of hbase:meta,,1.1588230740 at 1731556795077 (+1 ms)Flushing 1588230740/ns: creating writer at 1731556795077Flushing 1588230740/ns: appending metadata at 1731556795100 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1731556795100Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66a29c1f: reopening flushed file at 1731556795123 (+23 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 69ms, sequenceid=6, compaction requested=false at 1731556795144 (+21 ms)Writing region close event to WAL at 1731556795149 (+5 ms)Running coprocessor post-close hooks at 1731556795186 (+37 ms)Closed at 1731556795186 2024-11-14T03:59:55,186 DEBUG [RS_CLOSE_META-regionserver/6a989be77d39:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T03:59:55,276 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(976): stopping server 6a989be77d39,33623,1731556792851; all regions closed. 2024-11-14T03:59:55,281 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,282 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,282 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,282 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,282 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741834_1010 (size=1152) 2024-11-14T03:59:55,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741834_1010 (size=1152) 2024-11-14T03:59:55,303 DEBUG [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs 2024-11-14T03:59:55,304 INFO [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C33623%2C1731556792851.meta:.meta(num 1731556794738) 2024-11-14T03:59:55,304 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,304 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,304 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,306 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,306 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:55,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741833_1009 (size=93) 2024-11-14T03:59:55,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741833_1009 (size=93) 2024-11-14T03:59:55,314 DEBUG [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/oldWALs 2024-11-14T03:59:55,314 INFO [RS:0;6a989be77d39:33623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6a989be77d39%2C33623%2C1731556792851:(num 1731556794347) 2024-11-14T03:59:55,314 DEBUG [RS:0;6a989be77d39:33623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T03:59:55,314 INFO [RS:0;6a989be77d39:33623 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:59:55,315 INFO [RS:0;6a989be77d39:33623 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:59:55,315 INFO [RS:0;6a989be77d39:33623 {}] hbase.ChoreService(370): Chore service for: regionserver/6a989be77d39:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T03:59:55,315 INFO [regionserver/6a989be77d39:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:59:55,315 INFO [RS:0;6a989be77d39:33623 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:59:55,315 INFO [RS:0;6a989be77d39:33623 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33623 2024-11-14T03:59:55,317 INFO [RS:0;6a989be77d39:33623 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:59:55,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T03:59:55,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a989be77d39,33623,1731556792851 2024-11-14T03:59:55,318 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a989be77d39,33623,1731556792851] 2024-11-14T03:59:55,320 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6a989be77d39,33623,1731556792851 already deleted, retry=false 2024-11-14T03:59:55,320 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6a989be77d39,33623,1731556792851 expired; onlineServers=0 2024-11-14T03:59:55,320 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6a989be77d39,42509,1731556792597' ***** 2024-11-14T03:59:55,320 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T03:59:55,320 INFO [M:0;6a989be77d39:42509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T03:59:55,320 INFO [M:0;6a989be77d39:42509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T03:59:55,320 DEBUG [M:0;6a989be77d39:42509 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T03:59:55,320 DEBUG [M:0;6a989be77d39:42509 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T03:59:55,320 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T03:59:55,321 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556793878 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.small.0-1731556793878,5,FailOnTimeoutGroup] 2024-11-14T03:59:55,321 DEBUG [master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556793878 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a989be77d39:0:becomeActiveMaster-HFileCleaner.large.0-1731556793878,5,FailOnTimeoutGroup] 2024-11-14T03:59:55,321 INFO [M:0;6a989be77d39:42509 {}] hbase.ChoreService(370): Chore service for: master/6a989be77d39:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T03:59:55,321 INFO [M:0;6a989be77d39:42509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T03:59:55,321 DEBUG [M:0;6a989be77d39:42509 {}] master.HMaster(1795): Stopping service threads 2024-11-14T03:59:55,321 INFO [M:0;6a989be77d39:42509 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T03:59:55,321 INFO [M:0;6a989be77d39:42509 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T03:59:55,321 INFO [M:0;6a989be77d39:42509 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T03:59:55,321 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T03:59:55,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:55,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T03:59:55,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T03:59:55,324 DEBUG [M:0;6a989be77d39:42509 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-14T03:59:55,324 DEBUG [M:0;6a989be77d39:42509 {}] master.ActiveMasterManager(353): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-14T03:59:55,325 INFO [M:0;6a989be77d39:42509 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/.lastflushedseqids 2024-11-14T03:59:55,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:55,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741838_1014 (size=99) 2024-11-14T03:59:55,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741838_1014 (size=99) 2024-11-14T03:59:55,386 INFO [M:0;6a989be77d39:42509 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T03:59:55,386 INFO [M:0;6a989be77d39:42509 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T03:59:55,387 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T03:59:55,387 INFO [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:55,387 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:55,387 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T03:59:55,387 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:55,387 INFO [M:0;6a989be77d39:42509 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T03:59:55,417 DEBUG [M:0;6a989be77d39:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d974a3d7890647a8a59c4907842d3a98 is 82, key is hbase:meta,,1/info:regioninfo/1731556794887/Put/seqid=0 2024-11-14T03:59:55,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:55,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x100ef66cd450001, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:55,420 INFO [RS:0;6a989be77d39:33623 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:59:55,420 INFO [RS:0;6a989be77d39:33623 {}] regionserver.HRegionServer(1031): Exiting; stopping=6a989be77d39,33623,1731556792851; zookeeper connection closed. 2024-11-14T03:59:55,432 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@141d5345 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@141d5345 2024-11-14T03:59:55,432 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T03:59:55,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741839_1015 (size=5672) 2024-11-14T03:59:55,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741839_1015 (size=5672) 2024-11-14T03:59:55,453 INFO [M:0;6a989be77d39:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d974a3d7890647a8a59c4907842d3a98 2024-11-14T03:59:55,484 DEBUG [M:0;6a989be77d39:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a3240f48ef56452e8ccc8bec965c0eec is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731556794922/Put/seqid=0 2024-11-14T03:59:55,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741840_1016 (size=5275) 2024-11-14T03:59:55,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741840_1016 (size=5275) 2024-11-14T03:59:55,504 INFO [M:0;6a989be77d39:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a3240f48ef56452e8ccc8bec965c0eec 2024-11-14T03:59:55,546 DEBUG [M:0;6a989be77d39:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9a604d703974141b2e75e42eb93ca30 is 69, key is 6a989be77d39,33623,1731556792851/rs:state/1731556793971/Put/seqid=0 2024-11-14T03:59:55,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741841_1017 (size=5156) 2024-11-14T03:59:55,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741841_1017 (size=5156) 2024-11-14T03:59:55,556 INFO [M:0;6a989be77d39:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9a604d703974141b2e75e42eb93ca30 2024-11-14T03:59:55,589 DEBUG [M:0;6a989be77d39:42509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b88f2bea0a0c4b7e89420dd4f3a43355 is 52, key is load_balancer_on/state:d/1731556795007/Put/seqid=0 2024-11-14T03:59:55,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741842_1018 (size=5056) 2024-11-14T03:59:55,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741842_1018 (size=5056) 2024-11-14T03:59:56,020 INFO [M:0;6a989be77d39:42509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b88f2bea0a0c4b7e89420dd4f3a43355 2024-11-14T03:59:56,037 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d974a3d7890647a8a59c4907842d3a98 as hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d974a3d7890647a8a59c4907842d3a98 2024-11-14T03:59:56,048 INFO [M:0;6a989be77d39:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d974a3d7890647a8a59c4907842d3a98, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T03:59:56,050 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a3240f48ef56452e8ccc8bec965c0eec as hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a3240f48ef56452e8ccc8bec965c0eec 2024-11-14T03:59:56,076 INFO [M:0;6a989be77d39:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a3240f48ef56452e8ccc8bec965c0eec, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T03:59:56,080 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9a604d703974141b2e75e42eb93ca30 as hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9a604d703974141b2e75e42eb93ca30 2024-11-14T03:59:56,096 INFO [M:0;6a989be77d39:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9a604d703974141b2e75e42eb93ca30, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T03:59:56,098 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b88f2bea0a0c4b7e89420dd4f3a43355 as hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b88f2bea0a0c4b7e89420dd4f3a43355 2024-11-14T03:59:56,112 INFO [M:0;6a989be77d39:42509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40587/user/jenkins/test-data/cca57cc3-84a7-8983-8e4e-6517f3f2cfb4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b88f2bea0a0c4b7e89420dd4f3a43355, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T03:59:56,118 INFO [M:0;6a989be77d39:42509 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 730ms, sequenceid=29, compaction requested=false 2024-11-14T03:59:56,133 INFO [regionserver/6a989be77d39:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T03:59:56,157 INFO [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T03:59:56,157 DEBUG [M:0;6a989be77d39:42509 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731556795387Disabling compacts and flushes for region at 1731556795387Disabling writes for close at 1731556795387Obtaining lock to block concurrent updates at 1731556795387Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731556795387Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731556795387Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731556795389 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731556795389Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731556795416 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731556795416Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731556795460 (+44 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731556795484 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731556795484Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731556795512 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731556795545 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731556795545Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731556795566 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731556795588 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731556795588Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a6f9796: reopening flushed file at 1731556796033 (+445 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cc0f9d2: reopening flushed file at 1731556796049 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@155ff064: reopening flushed file at 1731556796076 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56703771: reopening flushed file at 1731556796096 (+20 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 730ms, sequenceid=29, compaction requested=false at 1731556796118 (+22 ms)Writing region close event to WAL at 1731556796157 (+39 ms)Closed at 1731556796157 2024-11-14T03:59:56,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:56,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:56,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:56,173 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:56,173 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T03:59:56,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44059 is added to blk_1073741830_1006 (size=10311) 2024-11-14T03:59:56,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741830_1006 (size=10311) 2024-11-14T03:59:56,189 INFO [M:0;6a989be77d39:42509 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T03:59:56,189 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T03:59:56,189 INFO [M:0;6a989be77d39:42509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42509 2024-11-14T03:59:56,189 INFO [M:0;6a989be77d39:42509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T03:59:56,301 INFO [M:0;6a989be77d39:42509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T03:59:56,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:56,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42509-0x100ef66cd450000, quorum=127.0.0.1:60098, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T03:59:56,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9268180{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:59:56,325 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@372f7d77{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:59:56,325 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:59:56,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e33487{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:59:56,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@349fe29e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir/,STOPPED} 2024-11-14T03:59:56,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,46431,1731556551881/6a989be77d39%2C46431%2C1731556551881.meta.1731556552817.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:56,331 WARN [BP-52747132-172.17.0.2-1731556790168 heartbeating to localhost/127.0.0.1:40587 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:59:56,331 WARN [BP-52747132-172.17.0.2-1731556790168 heartbeating to localhost/127.0.0.1:40587 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-52747132-172.17.0.2-1731556790168 (Datanode Uuid b922e687-ca70-43ea-a736-9905644e2286) service to localhost/127.0.0.1:40587 2024-11-14T03:59:56,333 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:59:56,333 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data4/current/BP-52747132-172.17.0.2-1731556790168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:56,333 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data3/current/BP-52747132-172.17.0.2-1731556790168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:56,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:59:56,337 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:59:56,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43619/user/jenkins/test-data/62179ab1-5d1d-6047-9594-648a3280df65/WALs/6a989be77d39,36173,1731556553047/6a989be77d39%2C36173%2C1731556553047.1731556553260 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T03:59:56,375 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b01e479{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T03:59:56,377 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@483384c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:59:56,378 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:59:56,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186dc28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:59:56,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444363ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir/,STOPPED} 2024-11-14T03:59:56,407 WARN [BP-52747132-172.17.0.2-1731556790168 heartbeating to localhost/127.0.0.1:40587 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T03:59:56,407 WARN [BP-52747132-172.17.0.2-1731556790168 heartbeating to localhost/127.0.0.1:40587 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-52747132-172.17.0.2-1731556790168 (Datanode Uuid 56cb16dc-553f-41e4-8ed8-2f79c089d510) service to localhost/127.0.0.1:40587 2024-11-14T03:59:56,408 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data1/current/BP-52747132-172.17.0.2-1731556790168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:56,408 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T03:59:56,408 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T03:59:56,409 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T03:59:56,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/cluster_a5242c2c-f6cc-3122-fb09-236585e6b550/data/data2/current/BP-52747132-172.17.0.2-1731556790168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T03:59:56,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1802ed83{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T03:59:56,445 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60c97719{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T03:59:56,445 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T03:59:56,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34f5471c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T03:59:56,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34b79769{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a0a93b27-6394-9149-abd9-ec7a20b5541c/hadoop.log.dir/,STOPPED} 2024-11-14T03:59:56,461 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T03:59:56,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T03:59:56,510 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 228) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40587 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40587 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40587 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40587 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40587 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40587 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40587 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40587 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=681 (was 601) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4905 (was 5049)