2024-12-04 21:47:59,853 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-04 21:47:59,871 main DEBUG Took 0.012979 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-04 21:47:59,872 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-04 21:47:59,872 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-04 21:47:59,875 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-04 21:47:59,877 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,885 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-04 21:47:59,900 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,902 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,903 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,905 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,906 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,906 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,907 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,908 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,908 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,909 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,909 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,910 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,911 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,911 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,912 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,912 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,912 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,913 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,913 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,914 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-04 21:47:59,914 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,915 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-04 21:47:59,916 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-04 21:47:59,918 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-04 21:47:59,920 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-04 21:47:59,921 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-04 21:47:59,930 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-04 21:47:59,931 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-04 21:47:59,941 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-04 21:47:59,944 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-04 21:47:59,946 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-04 21:47:59,946 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-04 21:47:59,947 main DEBUG createAppenders(={Console}) 2024-12-04 21:47:59,948 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-04 21:47:59,948 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-04 21:47:59,948 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-04 21:47:59,949 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-04 21:47:59,949 main DEBUG OutputStream closed 2024-12-04 21:47:59,950 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-04 21:47:59,950 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-04 21:47:59,950 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-04 21:48:00,040 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-04 21:48:00,042 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-04 21:48:00,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-04 21:48:00,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-04 21:48:00,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-04 21:48:00,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-04 21:48:00,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-04 21:48:00,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-04 21:48:00,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-04 21:48:00,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-04 21:48:00,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-04 21:48:00,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-04 21:48:00,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-04 21:48:00,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-04 21:48:00,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-04 21:48:00,051 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-04 21:48:00,051 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-04 21:48:00,052 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-04 21:48:00,054 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04 21:48:00,055 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-04 21:48:00,055 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-04 21:48:00,058 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-04T21:48:00,421 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5 2024-12-04 21:48:00,424 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-04 21:48:00,425 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-04T21:48:00,439 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-04T21:48:00,514 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=510, ProcessCount=11, AvailableMemoryMB=4222 2024-12-04T21:48:00,522 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:48:00,545 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c, deleteOnExit=true 2024-12-04T21:48:00,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:48:00,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/test.cache.data in system properties and HBase conf 2024-12-04T21:48:00,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:48:00,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:48:00,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:48:00,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:48:00,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:48:00,661 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-04T21:48:00,791 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:48:00,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:48:00,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:48:00,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:48:00,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:48:00,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:48:00,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:48:00,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:48:00,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:48:00,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:48:00,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:48:00,802 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:48:00,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:48:00,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:48:00,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:48:01,347 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:48:01,620 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-04T21:48:01,702 INFO [Time-limited test {}] log.Log(170): Logging initialized @2631ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-04T21:48:01,790 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:48:01,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:48:01,873 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:48:01,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:48:01,875 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:48:01,889 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:48:01,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:48:01,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:48:02,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/java.io.tmpdir/jetty-localhost-42919-hadoop-hdfs-3_4_1-tests_jar-_-any-1226852473497460670/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:48:02,103 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:42919} 2024-12-04T21:48:02,103 INFO [Time-limited test {}] server.Server(415): Started @3033ms 2024-12-04T21:48:02,137 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:48:02,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:48:02,503 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:48:02,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:48:02,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:48:02,505 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:48:02,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:48:02,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:48:02,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/java.io.tmpdir/jetty-localhost-34103-hadoop-hdfs-3_4_1-tests_jar-_-any-13630235817985669919/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:48:02,629 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:34103} 2024-12-04T21:48:02,629 INFO [Time-limited test {}] server.Server(415): Started @3559ms 2024-12-04T21:48:02,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:48:02,846 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:48:02,853 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:48:02,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:48:02,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:48:02,856 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:48:02,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:48:02,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:48:02,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/java.io.tmpdir/jetty-localhost-42791-hadoop-hdfs-3_4_1-tests_jar-_-any-12953962424543736687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:48:02,991 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:42791} 2024-12-04T21:48:02,992 INFO [Time-limited test {}] server.Server(415): Started @3921ms 2024-12-04T21:48:02,995 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:48:03,162 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data1/current/BP-1720806833-172.17.0.2-1733348881426/current, will proceed with Du for space computation calculation, 2024-12-04T21:48:03,163 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data2/current/BP-1720806833-172.17.0.2-1733348881426/current, will proceed with Du for space computation calculation, 2024-12-04T21:48:03,162 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data3/current/BP-1720806833-172.17.0.2-1733348881426/current, will proceed with Du for space computation calculation, 2024-12-04T21:48:03,163 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data4/current/BP-1720806833-172.17.0.2-1733348881426/current, will proceed with Du for space computation calculation, 2024-12-04T21:48:03,238 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:48:03,239 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:48:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a7934543ea5d943 with lease ID 0x5ac9e3dbd5435abb: Processing first storage report for DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad from datanode DatanodeRegistration(127.0.0.1:44691, datanodeUuid=94c3cc5b-9a88-4370-a92f-eef8d8f124e4, infoPort=42155, infoSecurePort=0, ipcPort=38533, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426) 2024-12-04T21:48:03,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a7934543ea5d943 with lease ID 0x5ac9e3dbd5435abb: from storage DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad node DatanodeRegistration(127.0.0.1:44691, datanodeUuid=94c3cc5b-9a88-4370-a92f-eef8d8f124e4, infoPort=42155, infoSecurePort=0, ipcPort=38533, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T21:48:03,309 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7be4e7bcca4482dc with lease ID 0x5ac9e3dbd5435abc: Processing first storage report for DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025 from datanode DatanodeRegistration(127.0.0.1:38409, datanodeUuid=df2a8dda-e6c2-4c6c-bc03-7b7c5b284ee9, infoPort=35345, infoSecurePort=0, ipcPort=37499, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426) 2024-12-04T21:48:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7be4e7bcca4482dc with lease ID 0x5ac9e3dbd5435abc: from storage DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025 node DatanodeRegistration(127.0.0.1:38409, datanodeUuid=df2a8dda-e6c2-4c6c-bc03-7b7c5b284ee9, infoPort=35345, infoSecurePort=0, ipcPort=37499, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:48:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a7934543ea5d943 with lease ID 0x5ac9e3dbd5435abb: Processing first storage report for DS-9e4e8e0c-9704-4b5c-abb3-8a15ada0bd2b from datanode DatanodeRegistration(127.0.0.1:44691, datanodeUuid=94c3cc5b-9a88-4370-a92f-eef8d8f124e4, infoPort=42155, infoSecurePort=0, ipcPort=38533, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426) 2024-12-04T21:48:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a7934543ea5d943 with lease ID 0x5ac9e3dbd5435abb: from storage DS-9e4e8e0c-9704-4b5c-abb3-8a15ada0bd2b node DatanodeRegistration(127.0.0.1:44691, datanodeUuid=94c3cc5b-9a88-4370-a92f-eef8d8f124e4, infoPort=42155, infoSecurePort=0, ipcPort=38533, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:48:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7be4e7bcca4482dc with lease ID 0x5ac9e3dbd5435abc: Processing first storage report for DS-b6085f49-d6fe-4fb8-8556-3c3b0380550c from datanode DatanodeRegistration(127.0.0.1:38409, datanodeUuid=df2a8dda-e6c2-4c6c-bc03-7b7c5b284ee9, infoPort=35345, infoSecurePort=0, ipcPort=37499, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426) 2024-12-04T21:48:03,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7be4e7bcca4482dc with lease ID 0x5ac9e3dbd5435abc: from storage DS-b6085f49-d6fe-4fb8-8556-3c3b0380550c node DatanodeRegistration(127.0.0.1:38409, datanodeUuid=df2a8dda-e6c2-4c6c-bc03-7b7c5b284ee9, infoPort=35345, infoSecurePort=0, ipcPort=37499, storageInfo=lv=-57;cid=testClusterID;nsid=2009483138;c=1733348881426), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:48:03,434 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5 2024-12-04T21:48:03,525 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/zookeeper_0, clientPort=61970, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:48:03,537 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61970 2024-12-04T21:48:03,551 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:03,557 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:03,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:48:03,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:48:04,238 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be with version=8 2024-12-04T21:48:04,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:48:04,347 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-04T21:48:04,598 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:48:04,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:48:04,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:48:04,615 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:48:04,615 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:48:04,615 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:48:04,763 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:48:04,837 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-04T21:48:04,846 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-04T21:48:04,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:48:04,885 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 83850 (auto-detected) 2024-12-04T21:48:04,886 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-04T21:48:04,913 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38031 2024-12-04T21:48:04,940 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38031 connecting to ZooKeeper ensemble=127.0.0.1:61970 2024-12-04T21:48:04,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:380310x0, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:48:04,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38031-0x100a73458a10000 connected 2024-12-04T21:48:05,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:05,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:05,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:48:05,030 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be, hbase.cluster.distributed=false 2024-12-04T21:48:05,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:48:05,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38031 2024-12-04T21:48:05,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38031 2024-12-04T21:48:05,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38031 2024-12-04T21:48:05,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38031 2024-12-04T21:48:05,087 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38031 2024-12-04T21:48:05,218 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:48:05,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:48:05,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:48:05,220 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:48:05,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:48:05,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:48:05,224 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:48:05,226 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:48:05,227 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40855 2024-12-04T21:48:05,230 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40855 connecting to ZooKeeper ensemble=127.0.0.1:61970 2024-12-04T21:48:05,232 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:05,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:05,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:408550x0, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:48:05,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408550x0, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:48:05,261 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:48:05,263 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40855-0x100a73458a10001 connected 2024-12-04T21:48:05,288 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:48:05,292 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:48:05,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:48:05,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40855 2024-12-04T21:48:05,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40855 2024-12-04T21:48:05,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40855 2024-12-04T21:48:05,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40855 2024-12-04T21:48:05,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40855 2024-12-04T21:48:05,340 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:38031 2024-12-04T21:48:05,341 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,38031,1733348884408 2024-12-04T21:48:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:48:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:48:05,352 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,38031,1733348884408 2024-12-04T21:48:05,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:48:05,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:05,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:05,383 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:48:05,384 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,38031,1733348884408 from backup master directory 2024-12-04T21:48:05,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:48:05,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,38031,1733348884408 2024-12-04T21:48:05,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:48:05,389 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:48:05,389 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,38031,1733348884408 2024-12-04T21:48:05,391 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-04T21:48:05,397 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-04T21:48:05,465 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase.id] with ID: b15ef73b-e015-4ade-b098-7e802c7c1d2f 2024-12-04T21:48:05,465 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/.tmp/hbase.id 2024-12-04T21:48:05,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:48:05,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:48:05,486 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/.tmp/hbase.id]:[hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase.id] 2024-12-04T21:48:05,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:05,549 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:48:05,568 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-04T21:48:05,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:05,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:48:05,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:48:05,613 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:48:05,615 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:48:05,624 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:48:05,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:48:05,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:48:05,704 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store 2024-12-04T21:48:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:48:05,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:48:05,737 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-04T21:48:05,741 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:48:05,742 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:48:05,743 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:48:05,743 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:48:05,745 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:48:05,745 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:48:05,745 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:48:05,747 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733348885742Disabling compacts and flushes for region at 1733348885742Disabling writes for close at 1733348885745 (+3 ms)Writing region close event to WAL at 1733348885745Closed at 1733348885745 2024-12-04T21:48:05,750 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/.initializing 2024-12-04T21:48:05,750 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/WALs/bb3046a53f79,38031,1733348884408 2024-12-04T21:48:05,776 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C38031%2C1733348884408, suffix=, logDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/WALs/bb3046a53f79,38031,1733348884408, archiveDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/oldWALs, maxLogs=10 2024-12-04T21:48:05,788 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C38031%2C1733348884408.1733348885782 2024-12-04T21:48:05,810 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/WALs/bb3046a53f79,38031,1733348884408/bb3046a53f79%2C38031%2C1733348884408.1733348885782 2024-12-04T21:48:05,824 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35345:35345),(127.0.0.1/127.0.0.1:42155:42155)] 2024-12-04T21:48:05,829 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:48:05,830 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:48:05,834 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,835 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:48:05,920 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:05,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:05,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:48:05,928 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:05,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:48:05,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,933 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:48:05,933 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:05,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:48:05,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:48:05,938 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:05,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:48:05,940 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,945 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,947 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,954 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,954 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,958 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:48:05,962 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:48:05,969 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:48:05,972 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833045, jitterRate=0.05927233397960663}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:48:05,979 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733348885851Initializing all the Stores at 1733348885854 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348885854Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348885857 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348885857Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348885857Cleaning up temporary data from old regions at 1733348885954 (+97 ms)Region opened successfully at 1733348885979 (+25 ms) 2024-12-04T21:48:05,980 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:48:06,024 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fdbcc1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:48:06,064 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:48:06,076 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:48:06,076 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:48:06,080 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:48:06,081 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-04T21:48:06,087 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-04T21:48:06,087 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:48:06,115 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:48:06,125 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:48:06,128 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:48:06,131 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:48:06,133 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:48:06,134 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:48:06,136 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:48:06,140 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:48:06,141 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:48:06,143 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:48:06,144 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:48:06,160 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:48:06,162 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:48:06,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:48:06,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:48:06,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,171 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,38031,1733348884408, sessionid=0x100a73458a10000, setting cluster-up flag (Was=false) 2024-12-04T21:48:06,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,189 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:48:06,190 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,38031,1733348884408 2024-12-04T21:48:06,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,198 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:48:06,200 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,38031,1733348884408 2024-12-04T21:48:06,205 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:48:06,223 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(746): ClusterId : b15ef73b-e015-4ade-b098-7e802c7c1d2f 2024-12-04T21:48:06,226 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:48:06,231 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:48:06,231 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:48:06,234 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:48:06,235 DEBUG [RS:0;bb3046a53f79:40855 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@740de624, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:48:06,258 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:40855 2024-12-04T21:48:06,262 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:48:06,262 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:48:06,263 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:48:06,265 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,38031,1733348884408 with port=40855, startcode=1733348885169 2024-12-04T21:48:06,278 DEBUG [RS:0;bb3046a53f79:40855 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:48:06,289 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:48:06,301 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:48:06,309 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:48:06,316 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,38031,1733348884408 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:48:06,324 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:48:06,325 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:48:06,325 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:48:06,325 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:48:06,326 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:48:06,326 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,326 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:48:06,326 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,345 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:48:06,346 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:48:06,353 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733348916353 2024-12-04T21:48:06,355 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:06,355 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:48:06,356 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:48:06,357 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:48:06,362 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:48:06,363 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:48:06,363 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:48:06,363 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:48:06,366 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,375 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:48:06,376 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36175, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:48:06,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:48:06,377 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:48:06,378 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:48:06,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:48:06,380 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:48:06,381 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be 2024-12-04T21:48:06,384 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:48:06,384 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:48:06,389 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348886386,5,FailOnTimeoutGroup] 2024-12-04T21:48:06,385 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38031 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-04T21:48:06,393 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348886389,5,FailOnTimeoutGroup] 2024-12-04T21:48:06,394 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,394 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:48:06,396 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,396 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:48:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:48:06,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:48:06,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:48:06,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:48:06,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:06,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:06,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:48:06,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:48:06,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:06,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:06,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:48:06,415 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:48:06,416 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:06,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:06,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:48:06,421 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-04T21:48:06,422 WARN [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-04T21:48:06,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:48:06,427 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:06,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:06,428 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:48:06,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740 2024-12-04T21:48:06,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740 2024-12-04T21:48:06,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:48:06,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:48:06,437 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:48:06,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:48:06,446 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:48:06,448 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713042, jitterRate=-0.09332036972045898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:48:06,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733348886403Initializing all the Stores at 1733348886404 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348886404Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348886405 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348886405Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348886405Cleaning up temporary data from old regions at 1733348886435 (+30 ms)Region opened successfully at 1733348886451 (+16 ms) 2024-12-04T21:48:06,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:48:06,452 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:48:06,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:48:06,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:48:06,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:48:06,453 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:48:06,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733348886452Disabling compacts and flushes for region at 1733348886452Disabling writes for close at 1733348886452Writing region close event to WAL at 1733348886453 (+1 ms)Closed at 1733348886453 2024-12-04T21:48:06,456 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:48:06,456 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:48:06,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:48:06,472 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:48:06,480 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:48:06,523 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,38031,1733348884408 with port=40855, startcode=1733348885169 2024-12-04T21:48:06,526 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38031 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,528 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38031 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,535 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be 2024-12-04T21:48:06,536 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44629 2024-12-04T21:48:06,536 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:48:06,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:48:06,541 DEBUG [RS:0;bb3046a53f79:40855 {}] zookeeper.ZKUtil(111): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,541 WARN [RS:0;bb3046a53f79:40855 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:48:06,541 INFO [RS:0;bb3046a53f79:40855 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:48:06,541 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,545 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,40855,1733348885169] 2024-12-04T21:48:06,568 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:48:06,580 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:48:06,587 INFO [RS:0;bb3046a53f79:40855 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:48:06,587 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,590 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:48:06,595 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:48:06,596 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,597 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,597 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,597 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,598 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,599 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,599 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:48:06,599 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:48:06,599 DEBUG [RS:0;bb3046a53f79:40855 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:48:06,601 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,601 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,601 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,602 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,602 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,602 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,40855,1733348885169-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:48:06,631 WARN [bb3046a53f79:38031 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:48:06,632 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:48:06,634 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,40855,1733348885169-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,634 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,635 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.Replication(171): bb3046a53f79,40855,1733348885169 started 2024-12-04T21:48:06,653 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:06,653 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,40855,1733348885169, RpcServer on bb3046a53f79/172.17.0.2:40855, sessionid=0x100a73458a10001 2024-12-04T21:48:06,655 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:48:06,655 DEBUG [RS:0;bb3046a53f79:40855 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,655 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,40855,1733348885169' 2024-12-04T21:48:06,656 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:48:06,657 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:48:06,658 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:48:06,658 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:48:06,659 DEBUG [RS:0;bb3046a53f79:40855 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,659 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,40855,1733348885169' 2024-12-04T21:48:06,659 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:48:06,660 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:48:06,660 DEBUG [RS:0;bb3046a53f79:40855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:48:06,660 INFO [RS:0;bb3046a53f79:40855 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:48:06,661 INFO [RS:0;bb3046a53f79:40855 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:48:06,770 INFO [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C40855%2C1733348885169, suffix=, logDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169, archiveDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs, maxLogs=32 2024-12-04T21:48:06,773 INFO [RS:0;bb3046a53f79:40855 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348886773 2024-12-04T21:48:06,795 INFO [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348886773 2024-12-04T21:48:06,798 DEBUG [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35345:35345),(127.0.0.1/127.0.0.1:42155:42155)] 2024-12-04T21:48:06,885 DEBUG [bb3046a53f79:38031 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:48:06,896 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,40855,1733348885169 2024-12-04T21:48:06,903 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,40855,1733348885169, state=OPENING 2024-12-04T21:48:06,908 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:48:06,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:48:06,910 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:48:06,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:48:06,912 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:48:06,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,40855,1733348885169}] 2024-12-04T21:48:07,094 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:48:07,098 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50965, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:48:07,110 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:48:07,111 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:48:07,116 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C40855%2C1733348885169.meta, suffix=.meta, logDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169, archiveDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs, maxLogs=32 2024-12-04T21:48:07,119 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.meta.1733348887118.meta 2024-12-04T21:48:07,141 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.meta.1733348887118.meta 2024-12-04T21:48:07,145 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42155:42155),(127.0.0.1/127.0.0.1:35345:35345)] 2024-12-04T21:48:07,150 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:48:07,153 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:48:07,159 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:48:07,165 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:48:07,171 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:48:07,172 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:48:07,173 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:48:07,173 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:48:07,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:48:07,186 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:48:07,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:07,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:07,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:48:07,191 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:48:07,191 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:07,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:07,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:48:07,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:48:07,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:07,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:07,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:48:07,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:48:07,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:07,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:48:07,200 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:48:07,203 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740 2024-12-04T21:48:07,207 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740 2024-12-04T21:48:07,211 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:48:07,211 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:48:07,212 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:48:07,216 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:48:07,219 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858589, jitterRate=0.091753289103508}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:48:07,219 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:48:07,221 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733348887174Writing region info on filesystem at 1733348887174Initializing all the Stores at 1733348887176 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348887176Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348887182 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348887182Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348887182Cleaning up temporary data from old regions at 1733348887211 (+29 ms)Running coprocessor post-open hooks at 1733348887219 (+8 ms)Region opened successfully at 1733348887221 (+2 ms) 2024-12-04T21:48:07,233 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733348887083 2024-12-04T21:48:07,248 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:48:07,249 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:48:07,251 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,40855,1733348885169 2024-12-04T21:48:07,254 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,40855,1733348885169, state=OPEN 2024-12-04T21:48:07,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:48:07,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:48:07,258 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:48:07,258 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,40855,1733348885169 2024-12-04T21:48:07,258 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:48:07,270 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:48:07,270 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,40855,1733348885169 in 345 msec 2024-12-04T21:48:07,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:48:07,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 810 msec 2024-12-04T21:48:07,282 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:48:07,282 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:48:07,307 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:48:07,308 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,40855,1733348885169, seqNum=-1] 2024-12-04T21:48:07,337 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:48:07,339 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:48:07,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1290 sec 2024-12-04T21:48:07,372 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733348887372, completionTime=-1 2024-12-04T21:48:07,375 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:48:07,375 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:48:07,404 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:48:07,404 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733348947404 2024-12-04T21:48:07,404 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349007404 2024-12-04T21:48:07,404 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-12-04T21:48:07,408 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38031,1733348884408-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,409 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38031,1733348884408-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,409 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38031,1733348884408-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,410 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:38031, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,411 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,411 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,417 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:48:07,442 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.053sec 2024-12-04T21:48:07,444 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:48:07,445 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:48:07,446 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:48:07,446 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:48:07,446 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:48:07,447 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38031,1733348884408-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:48:07,447 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38031,1733348884408-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:48:07,459 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:48:07,460 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:48:07,460 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38031,1733348884408-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:48:07,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49fc76c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:48:07,538 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-04T21:48:07,539 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-04T21:48:07,543 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,38031,-1 for getting cluster id 2024-12-04T21:48:07,547 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:48:07,558 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b15ef73b-e015-4ade-b098-7e802c7c1d2f' 2024-12-04T21:48:07,561 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:48:07,562 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b15ef73b-e015-4ade-b098-7e802c7c1d2f" 2024-12-04T21:48:07,562 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cebe6ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:48:07,562 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,38031,-1] 2024-12-04T21:48:07,566 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:48:07,571 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:48:07,573 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:48:07,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7004b612, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:48:07,578 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:48:07,588 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,40855,1733348885169, seqNum=-1] 2024-12-04T21:48:07,589 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:48:07,596 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:48:07,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,38031,1733348884408 2024-12-04T21:48:07,623 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:48:07,630 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:48:07,633 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T21:48:07,639 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is bb3046a53f79,38031,1733348884408 2024-12-04T21:48:07,642 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5f1dc041 2024-12-04T21:48:07,644 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T21:48:07,647 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T21:48:07,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T21:48:07,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T21:48:07,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:48:07,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-04T21:48:07,683 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T21:48:07,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-04T21:48:07,687 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:07,691 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T21:48:07,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:48:07,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741835_1011 (size=389) 2024-12-04T21:48:07,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741835_1011 (size=389) 2024-12-04T21:48:07,739 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 01ac40557e8ffded3f648e294479984c, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be 2024-12-04T21:48:07,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741836_1012 (size=72) 2024-12-04T21:48:07,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741836_1012 (size=72) 2024-12-04T21:48:07,757 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:48:07,757 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 01ac40557e8ffded3f648e294479984c, disabling compactions & flushes 2024-12-04T21:48:07,757 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:07,757 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:07,757 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. after waiting 0 ms 2024-12-04T21:48:07,757 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:07,757 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:07,757 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 01ac40557e8ffded3f648e294479984c: Waiting for close lock at 1733348887757Disabling compacts and flushes for region at 1733348887757Disabling writes for close at 1733348887757Writing region close event to WAL at 1733348887757Closed at 1733348887757 2024-12-04T21:48:07,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T21:48:07,767 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733348887762"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733348887762"}]},"ts":"1733348887762"} 2024-12-04T21:48:07,773 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T21:48:07,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T21:48:07,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733348887776"}]},"ts":"1733348887776"} 2024-12-04T21:48:07,785 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-04T21:48:07,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=01ac40557e8ffded3f648e294479984c, ASSIGN}] 2024-12-04T21:48:07,792 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=01ac40557e8ffded3f648e294479984c, ASSIGN 2024-12-04T21:48:07,794 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=01ac40557e8ffded3f648e294479984c, ASSIGN; state=OFFLINE, location=bb3046a53f79,40855,1733348885169; forceNewPlan=false, retain=false 2024-12-04T21:48:07,946 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=01ac40557e8ffded3f648e294479984c, regionState=OPENING, regionLocation=bb3046a53f79,40855,1733348885169 2024-12-04T21:48:07,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=01ac40557e8ffded3f648e294479984c, ASSIGN because future has completed 2024-12-04T21:48:07,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 01ac40557e8ffded3f648e294479984c, server=bb3046a53f79,40855,1733348885169}] 2024-12-04T21:48:08,112 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:08,113 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 01ac40557e8ffded3f648e294479984c, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:48:08,113 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,113 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:48:08,113 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,114 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,117 INFO [StoreOpener-01ac40557e8ffded3f648e294479984c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,119 INFO [StoreOpener-01ac40557e8ffded3f648e294479984c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 01ac40557e8ffded3f648e294479984c columnFamilyName info 2024-12-04T21:48:08,119 DEBUG [StoreOpener-01ac40557e8ffded3f648e294479984c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:48:08,121 INFO [StoreOpener-01ac40557e8ffded3f648e294479984c-1 {}] regionserver.HStore(327): Store=01ac40557e8ffded3f648e294479984c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:48:08,121 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,122 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,123 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,124 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,124 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,128 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,131 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:48:08,132 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 01ac40557e8ffded3f648e294479984c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827272, jitterRate=0.05193169414997101}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:48:08,132 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:08,134 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 01ac40557e8ffded3f648e294479984c: Running coprocessor pre-open hook at 1733348888114Writing region info on filesystem at 1733348888114Initializing all the Stores at 1733348888116 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348888116Cleaning up temporary data from old regions at 1733348888124 (+8 ms)Running coprocessor post-open hooks at 1733348888133 (+9 ms)Region opened successfully at 1733348888134 (+1 ms) 2024-12-04T21:48:08,136 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c., pid=6, masterSystemTime=1733348888106 2024-12-04T21:48:08,140 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:08,140 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:08,141 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=01ac40557e8ffded3f648e294479984c, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,40855,1733348885169 2024-12-04T21:48:08,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 01ac40557e8ffded3f648e294479984c, server=bb3046a53f79,40855,1733348885169 because future has completed 2024-12-04T21:48:08,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T21:48:08,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 01ac40557e8ffded3f648e294479984c, server=bb3046a53f79,40855,1733348885169 in 197 msec 2024-12-04T21:48:08,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T21:48:08,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=01ac40557e8ffded3f648e294479984c, ASSIGN in 367 msec 2024-12-04T21:48:08,161 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T21:48:08,162 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733348888161"}]},"ts":"1733348888161"} 2024-12-04T21:48:08,166 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-04T21:48:08,168 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T21:48:08,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 510 msec 2024-12-04T21:48:12,677 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-04T21:48:12,737 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T21:48:12,739 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-04T21:48:14,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T21:48:14,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T21:48:14,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-04T21:48:14,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-04T21:48:14,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:48:14,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T21:48:14,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T21:48:14,836 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-04T21:48:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38031 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:48:17,734 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-04T21:48:17,738 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-04T21:48:17,744 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-04T21:48:17,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:48:17,747 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348897746 2024-12-04T21:48:17,757 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:17,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:17,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:17,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:17,757 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:17,758 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348886773 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348897746 2024-12-04T21:48:17,759 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35345:35345),(127.0.0.1/127.0.0.1:42155:42155)] 2024-12-04T21:48:17,759 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348886773 is not closed yet, will try archiving it next time 2024-12-04T21:48:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741833_1009 (size=451) 2024-12-04T21:48:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741833_1009 (size=451) 2024-12-04T21:48:17,763 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348886773 to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs/bb3046a53f79%2C40855%2C1733348885169.1733348886773 2024-12-04T21:48:17,770 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c., hostname=bb3046a53f79,40855,1733348885169, seqNum=2] 2024-12-04T21:48:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40855 {}] regionserver.HRegion(8855): Flush requested on 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:29,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 01ac40557e8ffded3f648e294479984c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:48:29,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/a0026517501a41248367d9f759307857 is 1080, key is row0001/info:/1733348897773/Put/seqid=0 2024-12-04T21:48:29,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741838_1014 (size=12509) 2024-12-04T21:48:29,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741838_1014 (size=12509) 2024-12-04T21:48:29,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/a0026517501a41248367d9f759307857 2024-12-04T21:48:29,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/a0026517501a41248367d9f759307857 as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857 2024-12-04T21:48:29,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857, entries=7, sequenceid=11, filesize=12.2 K 2024-12-04T21:48:29,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 01ac40557e8ffded3f648e294479984c in 153ms, sequenceid=11, compaction requested=false 2024-12-04T21:48:29,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 01ac40557e8ffded3f648e294479984c: 2024-12-04T21:48:33,431 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:48:37,834 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348917834 2024-12-04T21:48:38,044 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:48:38,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:38,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:38,045 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:38,045 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:38,045 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:38,045 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348897746 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348917834 2024-12-04T21:48:38,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741837_1013 (size=12399) 2024-12-04T21:48:38,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741837_1013 (size=12399) 2024-12-04T21:48:38,057 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42155:42155),(127.0.0.1/127.0.0.1:35345:35345)] 2024-12-04T21:48:38,261 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:40,467 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:42,671 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:44,880 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40855 {}] regionserver.HRegion(8855): Flush requested on 01ac40557e8ffded3f648e294479984c 2024-12-04T21:48:44,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 01ac40557e8ffded3f648e294479984c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:48:45,086 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:45,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/4a1a398bd8b2425a93ba0505628fc04f is 1080, key is row0008/info:/1733348911818/Put/seqid=0 2024-12-04T21:48:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741840_1016 (size=12509) 2024-12-04T21:48:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741840_1016 (size=12509) 2024-12-04T21:48:45,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/4a1a398bd8b2425a93ba0505628fc04f 2024-12-04T21:48:45,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/4a1a398bd8b2425a93ba0505628fc04f as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/4a1a398bd8b2425a93ba0505628fc04f 2024-12-04T21:48:45,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/4a1a398bd8b2425a93ba0505628fc04f, entries=7, sequenceid=21, filesize=12.2 K 2024-12-04T21:48:45,329 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:45,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 01ac40557e8ffded3f648e294479984c in 448ms, sequenceid=21, compaction requested=false 2024-12-04T21:48:45,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 01ac40557e8ffded3f648e294479984c: 2024-12-04T21:48:45,330 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-04T21:48:45,330 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:48:45,331 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857 because midkey is the same as first or last row 2024-12-04T21:48:47,087 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:47,465 INFO [master/bb3046a53f79:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T21:48:47,465 INFO [master/bb3046a53f79:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T21:48:49,292 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:49,298 WARN [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:49,300 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C40855%2C1733348885169:(num 1733348917834) roll requested 2024-12-04T21:48:49,301 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348929301 2024-12-04T21:48:49,510 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:49,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:49,510 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:49,511 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:49,511 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:49,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:48:49,511 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348917834 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348929301 2024-12-04T21:48:49,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741839_1015 (size=7739) 2024-12-04T21:48:49,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741839_1015 (size=7739) 2024-12-04T21:48:49,517 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42155:42155),(127.0.0.1/127.0.0.1:35345:35345)] 2024-12-04T21:48:49,517 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348917834 is not closed yet, will try archiving it next time 2024-12-04T21:48:49,517 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348897746 to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs/bb3046a53f79%2C40855%2C1733348885169.1733348897746 2024-12-04T21:48:51,497 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:53,114 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 01ac40557e8ffded3f648e294479984c, had cached 0 bytes from a total of 25018 2024-12-04T21:48:53,706 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:55,912 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:48:58,117 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:49:00,120 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T21:49:00,122 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348940121 2024-12-04T21:49:03,431 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:49:05,142 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:49:05,144 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK], DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK]] 2024-12-04T21:49:05,145 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C40855%2C1733348885169:(num 1733348940121) roll requested 2024-12-04T21:49:05,145 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:05,145 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:05,145 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:05,145 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:05,145 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:05,146 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348929301 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348940121 2024-12-04T21:49:05,147 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35345:35345),(127.0.0.1/127.0.0.1:42155:42155)] 2024-12-04T21:49:05,147 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348929301 is not closed yet, will try archiving it next time 2024-12-04T21:49:05,147 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348945147 2024-12-04T21:49:05,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741841_1017 (size=4753) 2024-12-04T21:49:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741841_1017 (size=4753) 2024-12-04T21:49:10,153 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5003 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:49:10,154 WARN [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5003 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:49:10,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40855 {}] regionserver.HRegion(8855): Flush requested on 01ac40557e8ffded3f648e294479984c 2024-12-04T21:49:10,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 01ac40557e8ffded3f648e294479984c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:49:10,161 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:49:10,162 WARN [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:49:12,156 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T21:49:15,158 INFO [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:49:15,158 WARN [FSHLog-0-hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be-prefix:bb3046a53f79,40855,1733348885169 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38409,DS-f7aef68b-5f10-4e07-8bc0-4a5221dee025,DISK], DatanodeInfoWithStorage[127.0.0.1:44691,DS-e946162c-4eaa-47a1-81c4-57bdc3a3dfad,DISK]] 2024-12-04T21:49:15,158 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,158 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,158 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,159 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,159 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,159 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348940121 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348945147 2024-12-04T21:49:15,161 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42155:42155),(127.0.0.1/127.0.0.1:35345:35345)] 2024-12-04T21:49:15,161 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348940121 is not closed yet, will try archiving it next time 2024-12-04T21:49:15,161 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C40855%2C1733348885169:(num 1733348955161) roll requested 2024-12-04T21:49:15,161 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348955161 2024-12-04T21:49:15,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741842_1018 (size=1569) 2024-12-04T21:49:15,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741842_1018 (size=1569) 2024-12-04T21:49:15,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/010609c72b1147a2a77f2f788a0714fb is 1080, key is row0015/info:/1733348926884/Put/seqid=0 2024-12-04T21:49:15,180 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741844_1020 (size=12509) 2024-12-04T21:49:15,180 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,181 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348945147 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348955161 2024-12-04T21:49:15,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741844_1020 (size=12509) 2024-12-04T21:49:15,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/010609c72b1147a2a77f2f788a0714fb 2024-12-04T21:49:15,182 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42155:42155),(127.0.0.1/127.0.0.1:35345:35345)] 2024-12-04T21:49:15,182 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348945147 is not closed yet, will try archiving it next time 2024-12-04T21:49:15,182 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C40855%2C1733348885169.1733348955182 2024-12-04T21:49:15,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741843_1019 (size=93) 2024-12-04T21:49:15,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741843_1019 (size=93) 2024-12-04T21:49:15,184 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348945147 to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs/bb3046a53f79%2C40855%2C1733348885169.1733348945147 2024-12-04T21:49:15,191 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,191 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,191 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,192 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,192 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:15,192 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348955161 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/WALs/bb3046a53f79,40855,1733348885169/bb3046a53f79%2C40855%2C1733348885169.1733348955182 2024-12-04T21:49:15,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/010609c72b1147a2a77f2f788a0714fb as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/010609c72b1147a2a77f2f788a0714fb 2024-12-04T21:49:15,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741845_1021 (size=1258) 2024-12-04T21:49:15,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741845_1021 (size=1258) 2024-12-04T21:49:15,197 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42155:42155),(127.0.0.1/127.0.0.1:35345:35345)] 2024-12-04T21:49:15,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/010609c72b1147a2a77f2f788a0714fb, entries=7, sequenceid=31, filesize=12.2 K 2024-12-04T21:49:15,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 01ac40557e8ffded3f648e294479984c in 5050ms, sequenceid=31, compaction requested=true 2024-12-04T21:49:15,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 01ac40557e8ffded3f648e294479984c: 2024-12-04T21:49:15,205 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-04T21:49:15,205 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:15,205 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857 because midkey is the same as first or last row 2024-12-04T21:49:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 01ac40557e8ffded3f648e294479984c:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:49:15,208 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:49:15,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:49:15,211 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:49:15,212 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.HStore(1541): 01ac40557e8ffded3f648e294479984c/info is initiating minor compaction (all files) 2024-12-04T21:49:15,213 INFO [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 01ac40557e8ffded3f648e294479984c/info in TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:49:15,213 INFO [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/4a1a398bd8b2425a93ba0505628fc04f, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/010609c72b1147a2a77f2f788a0714fb] into tmpdir=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp, totalSize=36.6 K 2024-12-04T21:49:15,214 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] compactions.Compactor(225): Compacting a0026517501a41248367d9f759307857, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733348897773 2024-12-04T21:49:15,215 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a1a398bd8b2425a93ba0505628fc04f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733348911818 2024-12-04T21:49:15,216 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 010609c72b1147a2a77f2f788a0714fb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733348926884 2024-12-04T21:49:15,243 INFO [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 01ac40557e8ffded3f648e294479984c#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:49:15,244 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/fbafe73596254fd8b5d4fb32cd67718a is 1080, key is row0001/info:/1733348897773/Put/seqid=0 2024-12-04T21:49:15,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741847_1023 (size=27710) 2024-12-04T21:49:15,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741847_1023 (size=27710) 2024-12-04T21:49:15,261 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/fbafe73596254fd8b5d4fb32cd67718a as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/fbafe73596254fd8b5d4fb32cd67718a 2024-12-04T21:49:15,277 INFO [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 01ac40557e8ffded3f648e294479984c/info of 01ac40557e8ffded3f648e294479984c into fbafe73596254fd8b5d4fb32cd67718a(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:49:15,277 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 01ac40557e8ffded3f648e294479984c: 2024-12-04T21:49:15,279 INFO [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c., storeName=01ac40557e8ffded3f648e294479984c/info, priority=13, startTime=1733348955206; duration=0sec 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/fbafe73596254fd8b5d4fb32cd67718a because midkey is the same as first or last row 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/fbafe73596254fd8b5d4fb32cd67718a because midkey is the same as first or last row 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/fbafe73596254fd8b5d4fb32cd67718a because midkey is the same as first or last row 2024-12-04T21:49:15,280 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:49:15,281 DEBUG [RS:0;bb3046a53f79:40855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 01ac40557e8ffded3f648e294479984c:info 2024-12-04T21:49:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40855 {}] regionserver.HRegion(8855): Flush requested on 01ac40557e8ffded3f648e294479984c 2024-12-04T21:49:27,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 01ac40557e8ffded3f648e294479984c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:49:27,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/1676a27aa90245c3916d51ccb45f5a6f is 1080, key is row0022/info:/1733348955184/Put/seqid=0 2024-12-04T21:49:27,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741848_1024 (size=12509) 2024-12-04T21:49:27,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741848_1024 (size=12509) 2024-12-04T21:49:27,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/1676a27aa90245c3916d51ccb45f5a6f 2024-12-04T21:49:27,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/1676a27aa90245c3916d51ccb45f5a6f as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/1676a27aa90245c3916d51ccb45f5a6f 2024-12-04T21:49:27,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/1676a27aa90245c3916d51ccb45f5a6f, entries=7, sequenceid=42, filesize=12.2 K 2024-12-04T21:49:27,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 01ac40557e8ffded3f648e294479984c in 34ms, sequenceid=42, compaction requested=false 2024-12-04T21:49:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 01ac40557e8ffded3f648e294479984c: 2024-12-04T21:49:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-04T21:49:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/fbafe73596254fd8b5d4fb32cd67718a because midkey is the same as first or last row 2024-12-04T21:49:33,432 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:49:35,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:49:35,252 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:49:35,253 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:49:35,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:35,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:35,263 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:49:35,264 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:49:35,264 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=118547021, stopped=false 2024-12-04T21:49:35,264 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,38031,1733348884408 2024-12-04T21:49:35,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:35,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:35,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:35,265 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:49:35,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:35,265 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:49:35,266 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:49:35,266 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:35,266 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:35,266 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,40855,1733348885169' ***** 2024-12-04T21:49:35,266 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:35,266 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:49:35,266 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:49:35,267 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:49:35,267 INFO [RS:0;bb3046a53f79:40855 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:49:35,267 INFO [RS:0;bb3046a53f79:40855 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:49:35,267 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(3091): Received CLOSE for 01ac40557e8ffded3f648e294479984c 2024-12-04T21:49:35,268 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,40855,1733348885169 2024-12-04T21:49:35,268 INFO [RS:0;bb3046a53f79:40855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:49:35,268 INFO [RS:0;bb3046a53f79:40855 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:40855. 2024-12-04T21:49:35,268 DEBUG [RS:0;bb3046a53f79:40855 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:49:35,268 DEBUG [RS:0;bb3046a53f79:40855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:35,268 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 01ac40557e8ffded3f648e294479984c, disabling compactions & flushes 2024-12-04T21:49:35,268 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:49:35,268 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:49:35,268 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. after waiting 0 ms 2024-12-04T21:49:35,268 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:49:35,268 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:49:35,268 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:49:35,268 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:49:35,269 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:49:35,269 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 01ac40557e8ffded3f648e294479984c 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-04T21:49:35,269 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:49:35,269 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:49:35,269 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:49:35,269 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T21:49:35,269 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:49:35,269 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:49:35,269 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 01ac40557e8ffded3f648e294479984c=TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.} 2024-12-04T21:49:35,269 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-04T21:49:35,270 DEBUG [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1351): Waiting on 01ac40557e8ffded3f648e294479984c, 1588230740 2024-12-04T21:49:35,274 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/b2b149bc18624cc695862974ffe5e121 is 1080, key is row0029/info:/1733348969232/Put/seqid=0 2024-12-04T21:49:35,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741849_1025 (size=8193) 2024-12-04T21:49:35,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741849_1025 (size=8193) 2024-12-04T21:49:35,286 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/b2b149bc18624cc695862974ffe5e121 2024-12-04T21:49:35,290 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/info/d464905871da49e4a185ef36ea6db0d2 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c./info:regioninfo/1733348888141/Put/seqid=0 2024-12-04T21:49:35,296 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/.tmp/info/b2b149bc18624cc695862974ffe5e121 as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/b2b149bc18624cc695862974ffe5e121 2024-12-04T21:49:35,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741850_1026 (size=7016) 2024-12-04T21:49:35,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741850_1026 (size=7016) 2024-12-04T21:49:35,299 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/info/d464905871da49e4a185ef36ea6db0d2 2024-12-04T21:49:35,306 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/b2b149bc18624cc695862974ffe5e121, entries=3, sequenceid=48, filesize=8.0 K 2024-12-04T21:49:35,307 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 01ac40557e8ffded3f648e294479984c in 39ms, sequenceid=48, compaction requested=true 2024-12-04T21:49:35,308 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/4a1a398bd8b2425a93ba0505628fc04f, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/010609c72b1147a2a77f2f788a0714fb] to archive 2024-12-04T21:49:35,311 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T21:49:35,314 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857 to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/archive/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/a0026517501a41248367d9f759307857 2024-12-04T21:49:35,316 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/4a1a398bd8b2425a93ba0505628fc04f to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/archive/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/4a1a398bd8b2425a93ba0505628fc04f 2024-12-04T21:49:35,318 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/010609c72b1147a2a77f2f788a0714fb to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/archive/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/info/010609c72b1147a2a77f2f788a0714fb 2024-12-04T21:49:35,322 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/ns/b096f40a20c54cdf912a9f57c64afdfc is 43, key is default/ns:d/1733348887344/Put/seqid=0 2024-12-04T21:49:35,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741851_1027 (size=5153) 2024-12-04T21:49:35,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741851_1027 (size=5153) 2024-12-04T21:49:35,330 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/ns/b096f40a20c54cdf912a9f57c64afdfc 2024-12-04T21:49:35,330 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bb3046a53f79:38031 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T21:49:35,331 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a0026517501a41248367d9f759307857=12509, 4a1a398bd8b2425a93ba0505628fc04f=12509, 010609c72b1147a2a77f2f788a0714fb=12509] 2024-12-04T21:49:35,337 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/default/TestLogRolling-testSlowSyncLogRolling/01ac40557e8ffded3f648e294479984c/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-04T21:49:35,340 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:49:35,340 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 01ac40557e8ffded3f648e294479984c: Waiting for close lock at 1733348975268Running coprocessor pre-close hooks at 1733348975268Disabling compacts and flushes for region at 1733348975268Disabling writes for close at 1733348975268Obtaining lock to block concurrent updates at 1733348975269 (+1 ms)Preparing flush snapshotting stores in 01ac40557e8ffded3f648e294479984c at 1733348975269Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733348975269Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. at 1733348975270 (+1 ms)Flushing 01ac40557e8ffded3f648e294479984c/info: creating writer at 1733348975270Flushing 01ac40557e8ffded3f648e294479984c/info: appending metadata at 1733348975274 (+4 ms)Flushing 01ac40557e8ffded3f648e294479984c/info: closing flushed file at 1733348975274Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f299104: reopening flushed file at 1733348975295 (+21 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 01ac40557e8ffded3f648e294479984c in 39ms, sequenceid=48, compaction requested=true at 1733348975307 (+12 ms)Writing region close event to WAL at 1733348975332 (+25 ms)Running coprocessor post-close hooks at 1733348975338 (+6 ms)Closed at 1733348975339 (+1 ms) 2024-12-04T21:49:35,341 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733348887649.01ac40557e8ffded3f648e294479984c. 2024-12-04T21:49:35,356 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/table/1ddeb4e99b724c86a3e6fb3bbd2f0d4a is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733348888161/Put/seqid=0 2024-12-04T21:49:35,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741852_1028 (size=5396) 2024-12-04T21:49:35,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741852_1028 (size=5396) 2024-12-04T21:49:35,368 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/table/1ddeb4e99b724c86a3e6fb3bbd2f0d4a 2024-12-04T21:49:35,377 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/info/d464905871da49e4a185ef36ea6db0d2 as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/info/d464905871da49e4a185ef36ea6db0d2 2024-12-04T21:49:35,384 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/info/d464905871da49e4a185ef36ea6db0d2, entries=10, sequenceid=11, filesize=6.9 K 2024-12-04T21:49:35,386 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/ns/b096f40a20c54cdf912a9f57c64afdfc as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/ns/b096f40a20c54cdf912a9f57c64afdfc 2024-12-04T21:49:35,394 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/ns/b096f40a20c54cdf912a9f57c64afdfc, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T21:49:35,395 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/.tmp/table/1ddeb4e99b724c86a3e6fb3bbd2f0d4a as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/table/1ddeb4e99b724c86a3e6fb3bbd2f0d4a 2024-12-04T21:49:35,403 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/table/1ddeb4e99b724c86a3e6fb3bbd2f0d4a, entries=2, sequenceid=11, filesize=5.3 K 2024-12-04T21:49:35,405 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-12-04T21:49:35,411 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T21:49:35,411 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:49:35,412 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:49:35,412 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733348975269Running coprocessor pre-close hooks at 1733348975269Disabling compacts and flushes for region at 1733348975269Disabling writes for close at 1733348975269Obtaining lock to block concurrent updates at 1733348975269Preparing flush snapshotting stores in 1588230740 at 1733348975269Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733348975270 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733348975271 (+1 ms)Flushing 1588230740/info: creating writer at 1733348975271Flushing 1588230740/info: appending metadata at 1733348975290 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733348975290Flushing 1588230740/ns: creating writer at 1733348975307 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733348975322 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733348975322Flushing 1588230740/table: creating writer at 1733348975340 (+18 ms)Flushing 1588230740/table: appending metadata at 1733348975356 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733348975356Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f5e0ba8: reopening flushed file at 1733348975376 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b8c88d6: reopening flushed file at 1733348975384 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b1bf5e: reopening flushed file at 1733348975394 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1733348975405 (+11 ms)Writing region close event to WAL at 1733348975406 (+1 ms)Running coprocessor post-close hooks at 1733348975411 (+5 ms)Closed at 1733348975411 2024-12-04T21:49:35,412 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:49:35,470 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,40855,1733348885169; all regions closed. 2024-12-04T21:49:35,473 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,474 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,475 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741834_1010 (size=3066) 2024-12-04T21:49:35,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741834_1010 (size=3066) 2024-12-04T21:49:35,485 DEBUG [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs 2024-12-04T21:49:35,485 INFO [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C40855%2C1733348885169.meta:.meta(num 1733348887118) 2024-12-04T21:49:35,486 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,486 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,486 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,486 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,486 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741846_1022 (size=13040) 2024-12-04T21:49:35,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741846_1022 (size=13040) 2024-12-04T21:49:35,498 DEBUG [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/oldWALs 2024-12-04T21:49:35,498 INFO [RS:0;bb3046a53f79:40855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C40855%2C1733348885169:(num 1733348955182) 2024-12-04T21:49:35,498 DEBUG [RS:0;bb3046a53f79:40855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:35,498 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:49:35,498 INFO [RS:0;bb3046a53f79:40855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:49:35,498 INFO [RS:0;bb3046a53f79:40855 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-04T21:49:35,498 INFO [RS:0;bb3046a53f79:40855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:49:35,498 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:49:35,499 INFO [RS:0;bb3046a53f79:40855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40855 2024-12-04T21:49:35,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,40855,1733348885169 2024-12-04T21:49:35,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:49:35,502 INFO [RS:0;bb3046a53f79:40855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:49:35,503 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,40855,1733348885169] 2024-12-04T21:49:35,504 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,40855,1733348885169 already deleted, retry=false 2024-12-04T21:49:35,504 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,40855,1733348885169 expired; onlineServers=0 2024-12-04T21:49:35,504 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,38031,1733348884408' ***** 2024-12-04T21:49:35,504 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:49:35,504 INFO [M:0;bb3046a53f79:38031 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:49:35,504 INFO [M:0;bb3046a53f79:38031 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:49:35,504 DEBUG [M:0;bb3046a53f79:38031 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:49:35,505 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:49:35,505 DEBUG [M:0;bb3046a53f79:38031 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:49:35,505 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348886386 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348886386,5,FailOnTimeoutGroup] 2024-12-04T21:49:35,505 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348886389 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348886389,5,FailOnTimeoutGroup] 2024-12-04T21:49:35,505 INFO [M:0;bb3046a53f79:38031 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:49:35,505 INFO [M:0;bb3046a53f79:38031 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:49:35,505 DEBUG [M:0;bb3046a53f79:38031 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:49:35,505 INFO [M:0;bb3046a53f79:38031 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:49:35,505 INFO [M:0;bb3046a53f79:38031 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:49:35,506 INFO [M:0;bb3046a53f79:38031 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:49:35,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:49:35,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:35,506 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:49:35,506 DEBUG [M:0;bb3046a53f79:38031 {}] zookeeper.ZKUtil(347): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:49:35,507 WARN [M:0;bb3046a53f79:38031 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:49:35,508 INFO [M:0;bb3046a53f79:38031 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/.lastflushedseqids 2024-12-04T21:49:35,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741853_1029 (size=130) 2024-12-04T21:49:35,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741853_1029 (size=130) 2024-12-04T21:49:35,521 INFO [M:0;bb3046a53f79:38031 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:49:35,521 INFO [M:0;bb3046a53f79:38031 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:49:35,522 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:49:35,522 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:35,522 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:35,522 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:49:35,522 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:35,522 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-04T21:49:35,538 DEBUG [M:0;bb3046a53f79:38031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a42597b33834d4193029e6828dd12a3 is 82, key is hbase:meta,,1/info:regioninfo/1733348887251/Put/seqid=0 2024-12-04T21:49:35,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741854_1030 (size=5672) 2024-12-04T21:49:35,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741854_1030 (size=5672) 2024-12-04T21:49:35,544 INFO [M:0;bb3046a53f79:38031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a42597b33834d4193029e6828dd12a3 2024-12-04T21:49:35,567 DEBUG [M:0;bb3046a53f79:38031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/665f99ebe3b840889924fde17200cbcc is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733348888171/Put/seqid=0 2024-12-04T21:49:35,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741855_1031 (size=6247) 2024-12-04T21:49:35,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741855_1031 (size=6247) 2024-12-04T21:49:35,574 INFO [M:0;bb3046a53f79:38031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/665f99ebe3b840889924fde17200cbcc 2024-12-04T21:49:35,581 INFO [M:0;bb3046a53f79:38031 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 665f99ebe3b840889924fde17200cbcc 2024-12-04T21:49:35,596 DEBUG [M:0;bb3046a53f79:38031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e41283f17dfa4b55a76e877b0c05cabd is 69, key is bb3046a53f79,40855,1733348885169/rs:state/1733348886531/Put/seqid=0 2024-12-04T21:49:35,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741856_1032 (size=5156) 2024-12-04T21:49:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741856_1032 (size=5156) 2024-12-04T21:49:35,602 INFO [M:0;bb3046a53f79:38031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e41283f17dfa4b55a76e877b0c05cabd 2024-12-04T21:49:35,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:35,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40855-0x100a73458a10001, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:35,604 INFO [RS:0;bb3046a53f79:40855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:49:35,605 INFO [RS:0;bb3046a53f79:40855 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,40855,1733348885169; zookeeper connection closed. 2024-12-04T21:49:35,605 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1eae73c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1eae73c0 2024-12-04T21:49:35,606 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T21:49:35,623 DEBUG [M:0;bb3046a53f79:38031 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7b3b46d76d944acdb1d1d04c5e45256f is 52, key is load_balancer_on/state:d/1733348887626/Put/seqid=0 2024-12-04T21:49:35,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741857_1033 (size=5056) 2024-12-04T21:49:35,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741857_1033 (size=5056) 2024-12-04T21:49:35,630 INFO [M:0;bb3046a53f79:38031 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7b3b46d76d944acdb1d1d04c5e45256f 2024-12-04T21:49:35,638 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9a42597b33834d4193029e6828dd12a3 as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9a42597b33834d4193029e6828dd12a3 2024-12-04T21:49:35,645 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9a42597b33834d4193029e6828dd12a3, entries=8, sequenceid=59, filesize=5.5 K 2024-12-04T21:49:35,646 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/665f99ebe3b840889924fde17200cbcc as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/665f99ebe3b840889924fde17200cbcc 2024-12-04T21:49:35,652 INFO [M:0;bb3046a53f79:38031 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 665f99ebe3b840889924fde17200cbcc 2024-12-04T21:49:35,652 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/665f99ebe3b840889924fde17200cbcc, entries=6, sequenceid=59, filesize=6.1 K 2024-12-04T21:49:35,653 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e41283f17dfa4b55a76e877b0c05cabd as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e41283f17dfa4b55a76e877b0c05cabd 2024-12-04T21:49:35,660 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e41283f17dfa4b55a76e877b0c05cabd, entries=1, sequenceid=59, filesize=5.0 K 2024-12-04T21:49:35,661 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7b3b46d76d944acdb1d1d04c5e45256f as hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7b3b46d76d944acdb1d1d04c5e45256f 2024-12-04T21:49:35,668 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7b3b46d76d944acdb1d1d04c5e45256f, entries=1, sequenceid=59, filesize=4.9 K 2024-12-04T21:49:35,669 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=59, compaction requested=false 2024-12-04T21:49:35,671 INFO [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:35,671 DEBUG [M:0;bb3046a53f79:38031 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733348975522Disabling compacts and flushes for region at 1733348975522Disabling writes for close at 1733348975522Obtaining lock to block concurrent updates at 1733348975522Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733348975522Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733348975523 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733348975523Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733348975523Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733348975537 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733348975537Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733348975551 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733348975567 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733348975567Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733348975581 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733348975595 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733348975595Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733348975609 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733348975622 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733348975623 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b1a0d77: reopening flushed file at 1733348975637 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@296a7f1f: reopening flushed file at 1733348975645 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b71c4c3: reopening flushed file at 1733348975652 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@733a1132: reopening flushed file at 1733348975660 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=59, compaction requested=false at 1733348975669 (+9 ms)Writing region close event to WAL at 1733348975671 (+2 ms)Closed at 1733348975671 2024-12-04T21:49:35,672 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,672 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,672 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,672 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,673 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:35,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44691 is added to blk_1073741830_1006 (size=27973) 2024-12-04T21:49:35,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38409 is added to blk_1073741830_1006 (size=27973) 2024-12-04T21:49:35,676 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:49:35,676 INFO [M:0;bb3046a53f79:38031 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:49:35,677 INFO [M:0;bb3046a53f79:38031 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38031 2024-12-04T21:49:35,677 INFO [M:0;bb3046a53f79:38031 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:49:35,778 INFO [M:0;bb3046a53f79:38031 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:49:35,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:35,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38031-0x100a73458a10000, quorum=127.0.0.1:61970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:35,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:35,793 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:35,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:35,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:35,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:35,796 WARN [BP-1720806833-172.17.0.2-1733348881426 heartbeating to localhost/127.0.0.1:44629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:35,796 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:35,796 WARN [BP-1720806833-172.17.0.2-1733348881426 heartbeating to localhost/127.0.0.1:44629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1720806833-172.17.0.2-1733348881426 (Datanode Uuid df2a8dda-e6c2-4c6c-bc03-7b7c5b284ee9) service to localhost/127.0.0.1:44629 2024-12-04T21:49:35,796 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:35,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data3/current/BP-1720806833-172.17.0.2-1733348881426 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:35,798 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data4/current/BP-1720806833-172.17.0.2-1733348881426 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:35,799 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:35,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:35,801 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:35,801 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:35,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:35,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:35,803 WARN [BP-1720806833-172.17.0.2-1733348881426 heartbeating to localhost/127.0.0.1:44629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:35,803 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:35,803 WARN [BP-1720806833-172.17.0.2-1733348881426 heartbeating to localhost/127.0.0.1:44629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1720806833-172.17.0.2-1733348881426 (Datanode Uuid 94c3cc5b-9a88-4370-a92f-eef8d8f124e4) service to localhost/127.0.0.1:44629 2024-12-04T21:49:35,803 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:35,803 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data1/current/BP-1720806833-172.17.0.2-1733348881426 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:35,803 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/cluster_44c17f57-554f-29e5-d568-96d0ee9a2f1c/data/data2/current/BP-1720806833-172.17.0.2-1733348881426 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:35,804 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:35,812 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:49:35,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:35,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:35,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:35,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:35,820 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:49:35,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:49:35,857 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/bb3046a53f79:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/bb3046a53f79:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44629 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44629 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@44be2bb java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44629 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44629 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44629 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/bb3046a53f79:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/bb3046a53f79:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=210 (was 510), ProcessCount=11 (was 11), AvailableMemoryMB=3345 (was 4222) 2024-12-04T21:49:35,862 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=210, ProcessCount=11, AvailableMemoryMB=3345 2024-12-04T21:49:35,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.log.dir so I do NOT create it in target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/48b33a91-238b-1628-602e-6e3c0e2555f5/hadoop.tmp.dir so I do NOT create it in target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e, deleteOnExit=true 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/test.cache.data in system properties and HBase conf 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:49:35,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:49:35,864 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:49:35,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:49:35,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:49:35,879 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:49:35,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:35,939 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:35,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:35,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:35,941 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:49:35,942 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:35,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15baacc3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:35,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@698644f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:36,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@495bd725{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/java.io.tmpdir/jetty-localhost-46627-hadoop-hdfs-3_4_1-tests_jar-_-any-10876066129036372103/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:49:36,037 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33c6588a{HTTP/1.1, (http/1.1)}{localhost:46627} 2024-12-04T21:49:36,037 INFO [Time-limited test {}] server.Server(415): Started @96967ms 2024-12-04T21:49:36,049 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:49:36,101 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:36,105 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:36,106 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:36,106 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:36,106 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:49:36,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12a8548c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:36,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@290f57e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:36,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25713898{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/java.io.tmpdir/jetty-localhost-44433-hadoop-hdfs-3_4_1-tests_jar-_-any-2523228380348544036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:36,205 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2723d3ef{HTTP/1.1, (http/1.1)}{localhost:44433} 2024-12-04T21:49:36,205 INFO [Time-limited test {}] server.Server(415): Started @97135ms 2024-12-04T21:49:36,207 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:36,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:36,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:36,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:36,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:36,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:49:36,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e8eeadd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:36,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a6deee4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:36,269 WARN [Thread-431 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data2/current/BP-1659538331-172.17.0.2-1733348975890/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:36,269 WARN [Thread-430 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data1/current/BP-1659538331-172.17.0.2-1733348975890/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:36,288 WARN [Thread-409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:36,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7154af7104536a5e with lease ID 0x8983d37df9141409: Processing first storage report for DS-618fc56f-a4f5-4f09-8910-e5bb379c8969 from datanode DatanodeRegistration(127.0.0.1:33261, datanodeUuid=62729c45-34fd-48d1-b75f-284e658877ee, infoPort=42639, infoSecurePort=0, ipcPort=41009, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890) 2024-12-04T21:49:36,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7154af7104536a5e with lease ID 0x8983d37df9141409: from storage DS-618fc56f-a4f5-4f09-8910-e5bb379c8969 node DatanodeRegistration(127.0.0.1:33261, datanodeUuid=62729c45-34fd-48d1-b75f-284e658877ee, infoPort=42639, infoSecurePort=0, ipcPort=41009, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:36,291 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7154af7104536a5e with lease ID 0x8983d37df9141409: Processing first storage report for DS-2f4b44a2-a3dd-4298-ad48-09f0e3e8c58d from datanode DatanodeRegistration(127.0.0.1:33261, datanodeUuid=62729c45-34fd-48d1-b75f-284e658877ee, infoPort=42639, infoSecurePort=0, ipcPort=41009, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890) 2024-12-04T21:49:36,291 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7154af7104536a5e with lease ID 0x8983d37df9141409: from storage DS-2f4b44a2-a3dd-4298-ad48-09f0e3e8c58d node DatanodeRegistration(127.0.0.1:33261, datanodeUuid=62729c45-34fd-48d1-b75f-284e658877ee, infoPort=42639, infoSecurePort=0, ipcPort=41009, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:36,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@340f7bbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/java.io.tmpdir/jetty-localhost-33309-hadoop-hdfs-3_4_1-tests_jar-_-any-12686690983165669457/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:36,346 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@343123be{HTTP/1.1, (http/1.1)}{localhost:33309} 2024-12-04T21:49:36,346 INFO [Time-limited test {}] server.Server(415): Started @97276ms 2024-12-04T21:49:36,348 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:36,410 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data3/current/BP-1659538331-172.17.0.2-1733348975890/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:36,410 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data4/current/BP-1659538331-172.17.0.2-1733348975890/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:36,425 WARN [Thread-445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4bc5251fbcf8e06f with lease ID 0x8983d37df914140a: Processing first storage report for DS-932f220d-9cdd-4f0a-aee0-7d55a59c0250 from datanode DatanodeRegistration(127.0.0.1:40335, datanodeUuid=5a41869d-d51c-439a-8fcf-46d30a6b7e9a, infoPort=34805, infoSecurePort=0, ipcPort=40619, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890) 2024-12-04T21:49:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4bc5251fbcf8e06f with lease ID 0x8983d37df914140a: from storage DS-932f220d-9cdd-4f0a-aee0-7d55a59c0250 node DatanodeRegistration(127.0.0.1:40335, datanodeUuid=5a41869d-d51c-439a-8fcf-46d30a6b7e9a, infoPort=34805, infoSecurePort=0, ipcPort=40619, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4bc5251fbcf8e06f with lease ID 0x8983d37df914140a: Processing first storage report for DS-92b364af-2eb4-402c-863d-94067f697594 from datanode DatanodeRegistration(127.0.0.1:40335, datanodeUuid=5a41869d-d51c-439a-8fcf-46d30a6b7e9a, infoPort=34805, infoSecurePort=0, ipcPort=40619, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890) 2024-12-04T21:49:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4bc5251fbcf8e06f with lease ID 0x8983d37df914140a: from storage DS-92b364af-2eb4-402c-863d-94067f697594 node DatanodeRegistration(127.0.0.1:40335, datanodeUuid=5a41869d-d51c-439a-8fcf-46d30a6b7e9a, infoPort=34805, infoSecurePort=0, ipcPort=40619, storageInfo=lv=-57;cid=testClusterID;nsid=852900336;c=1733348975890), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:36,475 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4 2024-12-04T21:49:36,477 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/zookeeper_0, clientPort=49844, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:49:36,478 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49844 2024-12-04T21:49:36,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:49:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:49:36,493 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305 with version=8 2024-12-04T21:49:36,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:49:36,495 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:49:36,496 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:49:36,497 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46197 2024-12-04T21:49:36,498 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46197 connecting to ZooKeeper ensemble=127.0.0.1:49844 2024-12-04T21:49:36,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461970x0, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:49:36,503 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46197-0x100a735c3cb0000 connected 2024-12-04T21:49:36,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:36,522 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305, hbase.cluster.distributed=false 2024-12-04T21:49:36,525 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:49:36,525 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46197 2024-12-04T21:49:36,525 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46197 2024-12-04T21:49:36,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46197 2024-12-04T21:49:36,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46197 2024-12-04T21:49:36,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46197 2024-12-04T21:49:36,544 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:49:36,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:36,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:36,544 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:49:36,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:36,545 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:49:36,545 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:49:36,545 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:49:36,546 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46763 2024-12-04T21:49:36,547 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46763 connecting to ZooKeeper ensemble=127.0.0.1:49844 2024-12-04T21:49:36,548 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,551 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,557 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467630x0, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:49:36,557 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46763-0x100a735c3cb0001 connected 2024-12-04T21:49:36,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:36,558 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:49:36,559 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:49:36,559 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:49:36,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:49:36,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46763 2024-12-04T21:49:36,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46763 2024-12-04T21:49:36,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46763 2024-12-04T21:49:36,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46763 2024-12-04T21:49:36,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46763 2024-12-04T21:49:36,579 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:46197 2024-12-04T21:49:36,580 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,581 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:36,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:36,582 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,583 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:49:36,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,583 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,583 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:49:36,584 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,46197,1733348976495 from backup master directory 2024-12-04T21:49:36,585 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:36,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:36,585 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:49:36,585 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,590 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/hbase.id] with ID: 7f8dd09b-d566-4e10-acfb-96bbdbdbc043 2024-12-04T21:49:36,590 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/.tmp/hbase.id 2024-12-04T21:49:36,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:49:36,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:49:36,599 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/.tmp/hbase.id]:[hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/hbase.id] 2024-12-04T21:49:36,611 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:49:36,617 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:36,617 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:49:36,619 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-04T21:49:36,621 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:49:36,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:49:36,634 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:49:36,635 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:49:36,636 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:36,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:49:36,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:49:36,649 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store 2024-12-04T21:49:36,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:49:36,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:49:36,658 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:36,658 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:49:36,658 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:36,658 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:36,658 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:49:36,659 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:36,659 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:36,659 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733348976658Disabling compacts and flushes for region at 1733348976658Disabling writes for close at 1733348976658Writing region close event to WAL at 1733348976659 (+1 ms)Closed at 1733348976659 2024-12-04T21:49:36,660 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/.initializing 2024-12-04T21:49:36,660 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/WALs/bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,664 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46197%2C1733348976495, suffix=, logDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/WALs/bb3046a53f79,46197,1733348976495, archiveDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/oldWALs, maxLogs=10 2024-12-04T21:49:36,664 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46197%2C1733348976495.1733348976664 2024-12-04T21:49:36,671 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/WALs/bb3046a53f79,46197,1733348976495/bb3046a53f79%2C46197%2C1733348976495.1733348976664 2024-12-04T21:49:36,672 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34805:34805),(127.0.0.1/127.0.0.1:42639:42639)] 2024-12-04T21:49:36,672 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:49:36,673 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:36,673 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,673 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,679 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:49:36,679 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:36,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:49:36,682 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:36,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:49:36,685 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:36,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:49:36,687 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,688 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:36,688 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,689 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,689 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,691 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,691 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,691 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:49:36,692 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:36,695 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:49:36,695 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853681, jitterRate=0.08551231026649475}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:49:36,696 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733348976673Initializing all the Stores at 1733348976674 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348976674Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348976677 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348976677Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348976677Cleaning up temporary data from old regions at 1733348976691 (+14 ms)Region opened successfully at 1733348976696 (+5 ms) 2024-12-04T21:49:36,696 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:49:36,700 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4333873f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:49:36,701 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:49:36,701 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:49:36,701 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:49:36,702 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:49:36,702 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T21:49:36,703 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T21:49:36,703 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:49:36,705 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:49:36,706 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:49:36,707 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:49:36,707 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:49:36,708 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:49:36,708 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:49:36,709 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:49:36,710 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:49:36,711 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:49:36,711 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:49:36,712 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:49:36,714 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:49:36,715 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:49:36,716 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:36,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:36,716 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,717 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,46197,1733348976495, sessionid=0x100a735c3cb0000, setting cluster-up flag (Was=false) 2024-12-04T21:49:36,718 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,721 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:49:36,723 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,725 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:36,728 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:49:36,729 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,46197,1733348976495 2024-12-04T21:49:36,730 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:49:36,732 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:36,732 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:49:36,732 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:49:36,732 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,46197,1733348976495 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:49:36,734 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,736 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:36,736 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:49:36,737 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733349006737 2024-12-04T21:49:36,737 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:49:36,737 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,737 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:49:36,737 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:49:36,738 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:49:36,739 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:49:36,739 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:49:36,739 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:49:36,739 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348976739,5,FailOnTimeoutGroup] 2024-12-04T21:49:36,739 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348976739,5,FailOnTimeoutGroup] 2024-12-04T21:49:36,739 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,739 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:49:36,740 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,740 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:49:36,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:49:36,748 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:49:36,749 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305 2024-12-04T21:49:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:49:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:49:36,757 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:36,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:49:36,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:49:36,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:36,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:49:36,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:49:36,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:36,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:49:36,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:49:36,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:36,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:49:36,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:49:36,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:36,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:36,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:49:36,769 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740 2024-12-04T21:49:36,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740 2024-12-04T21:49:36,770 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(746): ClusterId : 7f8dd09b-d566-4e10-acfb-96bbdbdbc043 2024-12-04T21:49:36,770 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:49:36,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:49:36,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:49:36,772 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:49:36,772 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:49:36,773 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:49:36,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:49:36,775 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:49:36,775 DEBUG [RS:0;bb3046a53f79:46763 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63156891, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:49:36,777 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:49:36,778 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720149, jitterRate=-0.08428440988063812}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:49:36,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733348976757Initializing all the Stores at 1733348976758 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348976758Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348976758Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348976758Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348976758Cleaning up temporary data from old regions at 1733348976772 (+14 ms)Region opened successfully at 1733348976779 (+7 ms) 2024-12-04T21:49:36,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:49:36,779 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:49:36,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:49:36,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:49:36,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:49:36,779 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:49:36,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733348976779Disabling compacts and flushes for region at 1733348976779Disabling writes for close at 1733348976779Writing region close event to WAL at 1733348976779Closed at 1733348976779 2024-12-04T21:49:36,781 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:36,781 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:49:36,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:49:36,783 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:49:36,785 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:49:36,794 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:46763 2024-12-04T21:49:36,794 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:49:36,794 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:49:36,794 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:49:36,795 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,46197,1733348976495 with port=46763, startcode=1733348976543 2024-12-04T21:49:36,795 DEBUG [RS:0;bb3046a53f79:46763 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:49:36,798 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35705, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:49:36,798 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46197 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,46763,1733348976543 2024-12-04T21:49:36,799 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46197 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,46763,1733348976543 2024-12-04T21:49:36,801 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305 2024-12-04T21:49:36,801 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39903 2024-12-04T21:49:36,801 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:49:36,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:49:36,803 DEBUG [RS:0;bb3046a53f79:46763 {}] zookeeper.ZKUtil(111): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,46763,1733348976543 2024-12-04T21:49:36,803 WARN [RS:0;bb3046a53f79:46763 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:49:36,803 INFO [RS:0;bb3046a53f79:46763 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:36,804 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,46763,1733348976543] 2024-12-04T21:49:36,804 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/WALs/bb3046a53f79,46763,1733348976543 2024-12-04T21:49:36,809 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:49:36,812 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:49:36,812 INFO [RS:0;bb3046a53f79:46763 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:49:36,812 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,813 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:49:36,814 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:49:36,814 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,814 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,815 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:36,815 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:49:36,815 DEBUG [RS:0;bb3046a53f79:46763 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:49:36,817 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,817 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,817 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,817 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,817 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,817 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46763,1733348976543-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:49:36,831 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:49:36,831 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46763,1733348976543-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,832 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,832 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.Replication(171): bb3046a53f79,46763,1733348976543 started 2024-12-04T21:49:36,843 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:36,843 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,46763,1733348976543, RpcServer on bb3046a53f79/172.17.0.2:46763, sessionid=0x100a735c3cb0001 2024-12-04T21:49:36,844 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:49:36,844 DEBUG [RS:0;bb3046a53f79:46763 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,46763,1733348976543 2024-12-04T21:49:36,844 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,46763,1733348976543' 2024-12-04T21:49:36,844 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:49:36,845 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:49:36,845 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:49:36,845 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:49:36,845 DEBUG [RS:0;bb3046a53f79:46763 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,46763,1733348976543 2024-12-04T21:49:36,845 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,46763,1733348976543' 2024-12-04T21:49:36,846 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:49:36,846 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:49:36,847 DEBUG [RS:0;bb3046a53f79:46763 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:49:36,847 INFO [RS:0;bb3046a53f79:46763 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:49:36,847 INFO [RS:0;bb3046a53f79:46763 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:49:36,935 WARN [bb3046a53f79:46197 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:49:36,952 INFO [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46763%2C1733348976543, suffix=, logDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/WALs/bb3046a53f79,46763,1733348976543, archiveDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/oldWALs, maxLogs=32 2024-12-04T21:49:36,957 INFO [RS:0;bb3046a53f79:46763 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46763%2C1733348976543.1733348976956 2024-12-04T21:49:36,964 INFO [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/WALs/bb3046a53f79,46763,1733348976543/bb3046a53f79%2C46763%2C1733348976543.1733348976956 2024-12-04T21:49:36,965 DEBUG [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34805:34805),(127.0.0.1/127.0.0.1:42639:42639)] 2024-12-04T21:49:37,186 DEBUG [bb3046a53f79:46197 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:49:37,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,46763,1733348976543 2024-12-04T21:49:37,191 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,46763,1733348976543, state=OPENING 2024-12-04T21:49:37,194 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:49:37,197 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:37,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:37,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:37,198 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:49:37,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:37,198 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,46763,1733348976543}] 2024-12-04T21:49:37,353 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:49:37,356 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34943, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:49:37,364 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:49:37,364 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:37,368 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46763%2C1733348976543.meta, suffix=.meta, logDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/WALs/bb3046a53f79,46763,1733348976543, archiveDir=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/oldWALs, maxLogs=32 2024-12-04T21:49:37,369 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46763%2C1733348976543.meta.1733348977369.meta 2024-12-04T21:49:37,375 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/WALs/bb3046a53f79,46763,1733348976543/bb3046a53f79%2C46763%2C1733348976543.meta.1733348977369.meta 2024-12-04T21:49:37,376 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34805:34805),(127.0.0.1/127.0.0.1:42639:42639)] 2024-12-04T21:49:37,377 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:49:37,377 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:49:37,378 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:49:37,378 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:49:37,378 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:49:37,378 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:37,378 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:49:37,378 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:49:37,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:49:37,381 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:49:37,381 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:37,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:37,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:49:37,383 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:49:37,383 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:37,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:37,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:49:37,385 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:49:37,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:37,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:37,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:49:37,387 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:49:37,387 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:37,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:37,388 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:49:37,389 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740 2024-12-04T21:49:37,390 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740 2024-12-04T21:49:37,392 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:49:37,392 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:49:37,392 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:49:37,394 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:49:37,395 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781622, jitterRate=-0.006117090582847595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:49:37,395 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:49:37,396 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733348977378Writing region info on filesystem at 1733348977378Initializing all the Stores at 1733348977379 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348977380 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348977380Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348977380Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348977380Cleaning up temporary data from old regions at 1733348977392 (+12 ms)Running coprocessor post-open hooks at 1733348977395 (+3 ms)Region opened successfully at 1733348977396 (+1 ms) 2024-12-04T21:49:37,397 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733348977353 2024-12-04T21:49:37,401 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:49:37,401 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:49:37,402 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,46763,1733348976543 2024-12-04T21:49:37,403 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,46763,1733348976543, state=OPEN 2024-12-04T21:49:37,405 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:49:37,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:49:37,405 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,46763,1733348976543 2024-12-04T21:49:37,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:37,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:37,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:49:37,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,46763,1733348976543 in 207 msec 2024-12-04T21:49:37,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:49:37,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 627 msec 2024-12-04T21:49:37,413 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:37,413 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:49:37,415 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:49:37,415 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,46763,1733348976543, seqNum=-1] 2024-12-04T21:49:37,415 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:49:37,416 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58545, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:49:37,424 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 691 msec 2024-12-04T21:49:37,424 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733348977424, completionTime=-1 2024-12-04T21:49:37,425 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:49:37,425 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:49:37,427 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:49:37,427 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733349037427 2024-12-04T21:49:37,427 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349097427 2024-12-04T21:49:37,427 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-04T21:49:37,428 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46197,1733348976495-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,428 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46197,1733348976495-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,428 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46197,1733348976495-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,428 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:46197, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,428 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,429 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,431 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:49:37,434 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.849sec 2024-12-04T21:49:37,434 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:49:37,435 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:49:37,435 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:49:37,435 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:49:37,435 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:49:37,435 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46197,1733348976495-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:49:37,435 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46197,1733348976495-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:49:37,439 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:49:37,439 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:49:37,439 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46197,1733348976495-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:37,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ea4d062, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:49:37,471 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,46197,-1 for getting cluster id 2024-12-04T21:49:37,472 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:49:37,475 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7f8dd09b-d566-4e10-acfb-96bbdbdbc043' 2024-12-04T21:49:37,477 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:49:37,477 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7f8dd09b-d566-4e10-acfb-96bbdbdbc043" 2024-12-04T21:49:37,479 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e0c936b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:49:37,479 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,46197,-1] 2024-12-04T21:49:37,480 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:49:37,481 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:37,482 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:49:37,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4de0d959, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:49:37,484 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:49:37,485 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,46763,1733348976543, seqNum=-1] 2024-12-04T21:49:37,485 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:49:37,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:49:37,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,46197,1733348976495 2024-12-04T21:49:37,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:37,492 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:49:37,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:49:37,493 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:49:37,493 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:49:37,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:37,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:37,493 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:49:37,493 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:49:37,493 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=355378974, stopped=false 2024-12-04T21:49:37,493 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,46197,1733348976495 2024-12-04T21:49:37,495 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:37,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:37,495 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:37,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:37,495 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:49:37,495 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:49:37,495 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:49:37,495 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:37,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:37,495 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:37,496 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,46763,1733348976543' ***** 2024-12-04T21:49:37,496 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:49:37,496 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:49:37,496 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:49:37,496 INFO [RS:0;bb3046a53f79:46763 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:49:37,496 INFO [RS:0;bb3046a53f79:46763 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:49:37,496 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,46763,1733348976543 2024-12-04T21:49:37,496 INFO [RS:0;bb3046a53f79:46763 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:49:37,496 INFO [RS:0;bb3046a53f79:46763 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:46763. 2024-12-04T21:49:37,496 DEBUG [RS:0;bb3046a53f79:46763 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:49:37,497 DEBUG [RS:0;bb3046a53f79:46763 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:37,497 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:49:37,497 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:49:37,497 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:49:37,497 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:49:37,497 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T21:49:37,497 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-04T21:49:37,497 DEBUG [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T21:49:37,497 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:49:37,498 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:49:37,498 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:49:37,498 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:49:37,498 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:49:37,498 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-04T21:49:37,514 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/.tmp/ns/a55fab75c93941459bf4419254942478 is 43, key is default/ns:d/1733348977417/Put/seqid=0 2024-12-04T21:49:37,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741835_1011 (size=5153) 2024-12-04T21:49:37,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741835_1011 (size=5153) 2024-12-04T21:49:37,521 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/.tmp/ns/a55fab75c93941459bf4419254942478 2024-12-04T21:49:37,529 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/.tmp/ns/a55fab75c93941459bf4419254942478 as hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/ns/a55fab75c93941459bf4419254942478 2024-12-04T21:49:37,537 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/ns/a55fab75c93941459bf4419254942478, entries=2, sequenceid=6, filesize=5.0 K 2024-12-04T21:49:37,538 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-12-04T21:49:37,539 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T21:49:37,545 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-04T21:49:37,546 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:49:37,546 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:49:37,546 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733348977497Running coprocessor pre-close hooks at 1733348977497Disabling compacts and flushes for region at 1733348977497Disabling writes for close at 1733348977498 (+1 ms)Obtaining lock to block concurrent updates at 1733348977498Preparing flush snapshotting stores in 1588230740 at 1733348977498Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733348977498Flushing stores of hbase:meta,,1.1588230740 at 1733348977499 (+1 ms)Flushing 1588230740/ns: creating writer at 1733348977499Flushing 1588230740/ns: appending metadata at 1733348977513 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733348977513Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@781f6b86: reopening flushed file at 1733348977528 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1733348977539 (+11 ms)Writing region close event to WAL at 1733348977540 (+1 ms)Running coprocessor post-close hooks at 1733348977546 (+6 ms)Closed at 1733348977546 2024-12-04T21:49:37,546 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:49:37,698 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,46763,1733348976543; all regions closed. 2024-12-04T21:49:37,698 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,699 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,699 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,699 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,699 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741834_1010 (size=1152) 2024-12-04T21:49:37,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741834_1010 (size=1152) 2024-12-04T21:49:37,705 DEBUG [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/oldWALs 2024-12-04T21:49:37,705 INFO [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C46763%2C1733348976543.meta:.meta(num 1733348977369) 2024-12-04T21:49:37,705 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,706 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,706 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,706 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,706 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741833_1009 (size=93) 2024-12-04T21:49:37,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741833_1009 (size=93) 2024-12-04T21:49:37,710 DEBUG [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/oldWALs 2024-12-04T21:49:37,710 INFO [RS:0;bb3046a53f79:46763 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C46763%2C1733348976543:(num 1733348976956) 2024-12-04T21:49:37,711 DEBUG [RS:0;bb3046a53f79:46763 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:37,711 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:49:37,711 INFO [RS:0;bb3046a53f79:46763 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:49:37,711 INFO [RS:0;bb3046a53f79:46763 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T21:49:37,711 INFO [RS:0;bb3046a53f79:46763 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:49:37,711 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:49:37,711 INFO [RS:0;bb3046a53f79:46763 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46763 2024-12-04T21:49:37,712 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,46763,1733348976543 2024-12-04T21:49:37,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:49:37,712 INFO [RS:0;bb3046a53f79:46763 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:49:37,713 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,46763,1733348976543] 2024-12-04T21:49:37,714 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,46763,1733348976543 already deleted, retry=false 2024-12-04T21:49:37,714 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,46763,1733348976543 expired; onlineServers=0 2024-12-04T21:49:37,714 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,46197,1733348976495' ***** 2024-12-04T21:49:37,714 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:49:37,714 INFO [M:0;bb3046a53f79:46197 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:49:37,714 INFO [M:0;bb3046a53f79:46197 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:49:37,714 DEBUG [M:0;bb3046a53f79:46197 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:49:37,715 DEBUG [M:0;bb3046a53f79:46197 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:49:37,715 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:49:37,715 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348976739 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348976739,5,FailOnTimeoutGroup] 2024-12-04T21:49:37,715 INFO [M:0;bb3046a53f79:46197 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:49:37,715 INFO [M:0;bb3046a53f79:46197 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:49:37,715 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348976739 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348976739,5,FailOnTimeoutGroup] 2024-12-04T21:49:37,715 DEBUG [M:0;bb3046a53f79:46197 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:49:37,715 INFO [M:0;bb3046a53f79:46197 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:49:37,715 INFO [M:0;bb3046a53f79:46197 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:49:37,715 INFO [M:0;bb3046a53f79:46197 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:49:37,715 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:49:37,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:49:37,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:37,717 DEBUG [M:0;bb3046a53f79:46197 {}] zookeeper.ZKUtil(347): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:49:37,717 WARN [M:0;bb3046a53f79:46197 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:49:37,717 INFO [M:0;bb3046a53f79:46197 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/.lastflushedseqids 2024-12-04T21:49:37,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741836_1012 (size=99) 2024-12-04T21:49:37,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741836_1012 (size=99) 2024-12-04T21:49:37,728 INFO [M:0;bb3046a53f79:46197 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:49:37,728 INFO [M:0;bb3046a53f79:46197 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:49:37,728 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:49:37,728 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:37,728 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:37,728 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:49:37,728 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:37,728 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-04T21:49:37,750 DEBUG [M:0;bb3046a53f79:46197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/48dcec03ae8d4cb093e50b9fe912d96c is 82, key is hbase:meta,,1/info:regioninfo/1733348977402/Put/seqid=0 2024-12-04T21:49:37,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741837_1013 (size=5672) 2024-12-04T21:49:37,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741837_1013 (size=5672) 2024-12-04T21:49:37,755 INFO [M:0;bb3046a53f79:46197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/48dcec03ae8d4cb093e50b9fe912d96c 2024-12-04T21:49:37,777 DEBUG [M:0;bb3046a53f79:46197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a517eb5d0ce840dab7e0cfa21b5d21eb is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733348977423/Put/seqid=0 2024-12-04T21:49:37,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741838_1014 (size=5275) 2024-12-04T21:49:37,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741838_1014 (size=5275) 2024-12-04T21:49:37,783 INFO [M:0;bb3046a53f79:46197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a517eb5d0ce840dab7e0cfa21b5d21eb 2024-12-04T21:49:37,803 DEBUG [M:0;bb3046a53f79:46197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fc2775e88725443bbb7ad58a0adf48b7 is 69, key is bb3046a53f79,46763,1733348976543/rs:state/1733348976799/Put/seqid=0 2024-12-04T21:49:37,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741839_1015 (size=5156) 2024-12-04T21:49:37,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741839_1015 (size=5156) 2024-12-04T21:49:37,810 INFO [M:0;bb3046a53f79:46197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fc2775e88725443bbb7ad58a0adf48b7 2024-12-04T21:49:37,814 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:37,814 INFO [RS:0;bb3046a53f79:46763 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:49:37,814 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46763-0x100a735c3cb0001, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:37,814 INFO [RS:0;bb3046a53f79:46763 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,46763,1733348976543; zookeeper connection closed. 2024-12-04T21:49:37,814 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68ab08be {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68ab08be 2024-12-04T21:49:37,814 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T21:49:37,831 DEBUG [M:0;bb3046a53f79:46197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c4d7e14885444b0a357b309f530fc4d is 52, key is load_balancer_on/state:d/1733348977491/Put/seqid=0 2024-12-04T21:49:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741840_1016 (size=5056) 2024-12-04T21:49:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741840_1016 (size=5056) 2024-12-04T21:49:37,838 INFO [M:0;bb3046a53f79:46197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c4d7e14885444b0a357b309f530fc4d 2024-12-04T21:49:37,845 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/48dcec03ae8d4cb093e50b9fe912d96c as hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/48dcec03ae8d4cb093e50b9fe912d96c 2024-12-04T21:49:37,851 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/48dcec03ae8d4cb093e50b9fe912d96c, entries=8, sequenceid=29, filesize=5.5 K 2024-12-04T21:49:37,852 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a517eb5d0ce840dab7e0cfa21b5d21eb as hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a517eb5d0ce840dab7e0cfa21b5d21eb 2024-12-04T21:49:37,858 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a517eb5d0ce840dab7e0cfa21b5d21eb, entries=3, sequenceid=29, filesize=5.2 K 2024-12-04T21:49:37,860 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fc2775e88725443bbb7ad58a0adf48b7 as hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fc2775e88725443bbb7ad58a0adf48b7 2024-12-04T21:49:37,866 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fc2775e88725443bbb7ad58a0adf48b7, entries=1, sequenceid=29, filesize=5.0 K 2024-12-04T21:49:37,868 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c4d7e14885444b0a357b309f530fc4d as hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c4d7e14885444b0a357b309f530fc4d 2024-12-04T21:49:37,874 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39903/user/jenkins/test-data/dbc3e55f-13b1-cded-7c53-7dc561575305/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c4d7e14885444b0a357b309f530fc4d, entries=1, sequenceid=29, filesize=4.9 K 2024-12-04T21:49:37,876 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false 2024-12-04T21:49:37,877 INFO [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:37,878 DEBUG [M:0;bb3046a53f79:46197 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733348977728Disabling compacts and flushes for region at 1733348977728Disabling writes for close at 1733348977728Obtaining lock to block concurrent updates at 1733348977728Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733348977728Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733348977729 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733348977730 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733348977730Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733348977749 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733348977749Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733348977761 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733348977776 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733348977776Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733348977788 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733348977803 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733348977803Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733348977816 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733348977831 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733348977831Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bee615e: reopening flushed file at 1733348977844 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b08c4fa: reopening flushed file at 1733348977851 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@497aea42: reopening flushed file at 1733348977859 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5272717d: reopening flushed file at 1733348977867 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false at 1733348977876 (+9 ms)Writing region close event to WAL at 1733348977877 (+1 ms)Closed at 1733348977877 2024-12-04T21:49:37,878 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,879 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,879 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:37,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33261 is added to blk_1073741830_1006 (size=10311) 2024-12-04T21:49:37,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40335 is added to blk_1073741830_1006 (size=10311) 2024-12-04T21:49:37,882 INFO [M:0;bb3046a53f79:46197 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:49:37,882 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:49:37,882 INFO [M:0;bb3046a53f79:46197 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46197 2024-12-04T21:49:37,883 INFO [M:0;bb3046a53f79:46197 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:49:37,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:37,984 INFO [M:0;bb3046a53f79:46197 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:49:37,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46197-0x100a735c3cb0000, quorum=127.0.0.1:49844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:49:37,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@340f7bbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:37,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@343123be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:37,988 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:37,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a6deee4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:37,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e8eeadd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:37,989 WARN [BP-1659538331-172.17.0.2-1733348975890 heartbeating to localhost/127.0.0.1:39903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:37,989 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:37,989 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:37,989 WARN [BP-1659538331-172.17.0.2-1733348975890 heartbeating to localhost/127.0.0.1:39903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1659538331-172.17.0.2-1733348975890 (Datanode Uuid 5a41869d-d51c-439a-8fcf-46d30a6b7e9a) service to localhost/127.0.0.1:39903 2024-12-04T21:49:37,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data3/current/BP-1659538331-172.17.0.2-1733348975890 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:37,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data4/current/BP-1659538331-172.17.0.2-1733348975890 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:37,990 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:37,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25713898{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:37,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2723d3ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:37,992 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:37,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@290f57e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:37,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12a8548c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:37,994 WARN [BP-1659538331-172.17.0.2-1733348975890 heartbeating to localhost/127.0.0.1:39903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:37,994 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:37,994 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:37,994 WARN [BP-1659538331-172.17.0.2-1733348975890 heartbeating to localhost/127.0.0.1:39903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1659538331-172.17.0.2-1733348975890 (Datanode Uuid 62729c45-34fd-48d1-b75f-284e658877ee) service to localhost/127.0.0.1:39903 2024-12-04T21:49:37,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data1/current/BP-1659538331-172.17.0.2-1733348975890 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:37,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/cluster_91c8eb82-6559-da37-7eee-2776ace9824e/data/data2/current/BP-1659538331-172.17.0.2-1733348975890 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:37,995 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:38,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@495bd725{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:49:38,001 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33c6588a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:38,001 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:38,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@698644f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:38,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15baacc3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:38,006 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.log.dir so I do NOT create it in target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7ee911ad-b455-18a5-4622-3f68556a01d4/hadoop.tmp.dir so I do NOT create it in target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48, deleteOnExit=true 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/test.cache.data in system properties and HBase conf 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:49:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:49:38,023 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:49:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:49:38,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:49:38,036 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:49:38,084 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:38,089 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:38,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:38,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:38,095 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:49:38,095 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:38,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fe50a4a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:38,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@498b2e6c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:38,187 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b27d212{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-33829-hadoop-hdfs-3_4_1-tests_jar-_-any-4057539562091074528/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:49:38,188 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@92a3852{HTTP/1.1, (http/1.1)}{localhost:33829} 2024-12-04T21:49:38,188 INFO [Time-limited test {}] server.Server(415): Started @99118ms 2024-12-04T21:49:38,199 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:49:38,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:38,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:38,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:38,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:38,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:49:38,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@430ff8b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:38,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7c849b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:38,342 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4317eac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-41057-hadoop-hdfs-3_4_1-tests_jar-_-any-12870142338749501696/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:38,343 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@499897b6{HTTP/1.1, (http/1.1)}{localhost:41057} 2024-12-04T21:49:38,343 INFO [Time-limited test {}] server.Server(415): Started @99273ms 2024-12-04T21:49:38,344 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:38,373 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:38,378 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:38,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:38,379 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:38,379 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:49:38,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27772cb6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:38,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46fa755{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:38,403 WARN [Thread-649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data1/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:38,403 WARN [Thread-650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data2/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:38,418 WARN [Thread-628 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64af55957b01e83b with lease ID 0x24418da8b59e6ae2: Processing first storage report for DS-d257fea5-30b5-4cb6-9919-c89aef35eb43 from datanode DatanodeRegistration(127.0.0.1:39293, datanodeUuid=4ea1e97a-75b8-4db3-a557-fb6227b680da, infoPort=35275, infoSecurePort=0, ipcPort=46515, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64af55957b01e83b with lease ID 0x24418da8b59e6ae2: from storage DS-d257fea5-30b5-4cb6-9919-c89aef35eb43 node DatanodeRegistration(127.0.0.1:39293, datanodeUuid=4ea1e97a-75b8-4db3-a557-fb6227b680da, infoPort=35275, infoSecurePort=0, ipcPort=46515, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64af55957b01e83b with lease ID 0x24418da8b59e6ae2: Processing first storage report for DS-ead3751f-1555-466c-9636-9fc1723b3e1a from datanode DatanodeRegistration(127.0.0.1:39293, datanodeUuid=4ea1e97a-75b8-4db3-a557-fb6227b680da, infoPort=35275, infoSecurePort=0, ipcPort=46515, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:38,420 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64af55957b01e83b with lease ID 0x24418da8b59e6ae2: from storage DS-ead3751f-1555-466c-9636-9fc1723b3e1a node DatanodeRegistration(127.0.0.1:39293, datanodeUuid=4ea1e97a-75b8-4db3-a557-fb6227b680da, infoPort=35275, infoSecurePort=0, ipcPort=46515, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:38,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21fae364{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-32891-hadoop-hdfs-3_4_1-tests_jar-_-any-5630862864812185143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:38,488 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1cbabe3e{HTTP/1.1, (http/1.1)}{localhost:32891} 2024-12-04T21:49:38,488 INFO [Time-limited test {}] server.Server(415): Started @99418ms 2024-12-04T21:49:38,490 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:38,561 WARN [Thread-675 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data3/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:38,561 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data4/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:38,580 WARN [Thread-664 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:38,582 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x550f35c362e3f12c with lease ID 0x24418da8b59e6ae3: Processing first storage report for DS-355869a6-3520-426c-9c30-2d5062b9d3c1 from datanode DatanodeRegistration(127.0.0.1:40897, datanodeUuid=d1e53198-d7bc-4599-888e-c6fe62466d4c, infoPort=41327, infoSecurePort=0, ipcPort=39029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x550f35c362e3f12c with lease ID 0x24418da8b59e6ae3: from storage DS-355869a6-3520-426c-9c30-2d5062b9d3c1 node DatanodeRegistration(127.0.0.1:40897, datanodeUuid=d1e53198-d7bc-4599-888e-c6fe62466d4c, infoPort=41327, infoSecurePort=0, ipcPort=39029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x550f35c362e3f12c with lease ID 0x24418da8b59e6ae3: Processing first storage report for DS-11239e35-2846-4a20-a4e6-f3085aa93be7 from datanode DatanodeRegistration(127.0.0.1:40897, datanodeUuid=d1e53198-d7bc-4599-888e-c6fe62466d4c, infoPort=41327, infoSecurePort=0, ipcPort=39029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x550f35c362e3f12c with lease ID 0x24418da8b59e6ae3: from storage DS-11239e35-2846-4a20-a4e6-f3085aa93be7 node DatanodeRegistration(127.0.0.1:40897, datanodeUuid=d1e53198-d7bc-4599-888e-c6fe62466d4c, infoPort=41327, infoSecurePort=0, ipcPort=39029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T21:49:38,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a 2024-12-04T21:49:38,624 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/zookeeper_0, clientPort=51562, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:49:38,625 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51562 2024-12-04T21:49:38,626 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:49:38,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:49:38,641 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a with version=8 2024-12-04T21:49:38,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:49:38,643 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:49:38,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:38,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:38,643 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:49:38,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:38,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:49:38,644 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:49:38,644 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:49:38,644 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38967 2024-12-04T21:49:38,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38967 connecting to ZooKeeper ensemble=127.0.0.1:51562 2024-12-04T21:49:38,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389670x0, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:49:38,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38967-0x100a735cc2d0000 connected 2024-12-04T21:49:38,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,668 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,670 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:38,670 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a, hbase.cluster.distributed=false 2024-12-04T21:49:38,672 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:49:38,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38967 2024-12-04T21:49:38,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38967 2024-12-04T21:49:38,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38967 2024-12-04T21:49:38,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38967 2024-12-04T21:49:38,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38967 2024-12-04T21:49:38,686 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:49:38,687 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:49:38,688 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39097 2024-12-04T21:49:38,689 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39097 connecting to ZooKeeper ensemble=127.0.0.1:51562 2024-12-04T21:49:38,689 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390970x0, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:49:38,695 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39097-0x100a735cc2d0001 connected 2024-12-04T21:49:38,695 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:49:38,695 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:49:38,696 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:49:38,697 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:49:38,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:49:38,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39097 2024-12-04T21:49:38,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39097 2024-12-04T21:49:38,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39097 2024-12-04T21:49:38,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39097 2024-12-04T21:49:38,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39097 2024-12-04T21:49:38,717 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:38967 2024-12-04T21:49:38,717 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,38967,1733348978643 2024-12-04T21:49:38,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:38,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:38,719 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,38967,1733348978643 2024-12-04T21:49:38,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:38,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:49:38,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:38,721 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:49:38,721 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,38967,1733348978643 from backup master directory 2024-12-04T21:49:38,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:38,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,38967,1733348978643 2024-12-04T21:49:38,722 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:49:38,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:49:38,722 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,38967,1733348978643 2024-12-04T21:49:38,727 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/hbase.id] with ID: 7e5d7749-a411-4d4f-a37b-2d53840dea68 2024-12-04T21:49:38,727 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/.tmp/hbase.id 2024-12-04T21:49:38,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:49:38,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:49:38,736 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/.tmp/hbase.id]:[hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/hbase.id] 2024-12-04T21:49:38,750 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:38,750 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:49:38,751 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T21:49:38,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:38,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:38,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:49:38,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:49:38,762 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:49:38,763 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:49:38,763 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:38,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:49:38,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:49:38,773 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store 2024-12-04T21:49:38,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:49:38,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:49:38,818 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:49:39,183 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:39,183 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:49:39,183 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:39,184 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:39,184 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:49:39,184 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:39,184 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:49:39,184 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733348979183Disabling compacts and flushes for region at 1733348979183Disabling writes for close at 1733348979184 (+1 ms)Writing region close event to WAL at 1733348979184Closed at 1733348979184 2024-12-04T21:49:39,187 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/.initializing 2024-12-04T21:49:39,187 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643 2024-12-04T21:49:39,193 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C38967%2C1733348978643, suffix=, logDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643, archiveDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/oldWALs, maxLogs=10 2024-12-04T21:49:39,194 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C38967%2C1733348978643.1733348979193 2024-12-04T21:49:39,200 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 2024-12-04T21:49:39,201 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35275:35275),(127.0.0.1/127.0.0.1:41327:41327)] 2024-12-04T21:49:39,202 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:49:39,202 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:39,202 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,202 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:49:39,206 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:49:39,208 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:39,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,211 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:49:39,211 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:39,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:49:39,214 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:39,215 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,216 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,217 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,219 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,219 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,220 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:49:39,222 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:49:39,225 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:49:39,226 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810537, jitterRate=0.030651196837425232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:49:39,227 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733348979202Initializing all the Stores at 1733348979203 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979203Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348979204 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348979204Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348979204Cleaning up temporary data from old regions at 1733348979219 (+15 ms)Region opened successfully at 1733348979227 (+8 ms) 2024-12-04T21:49:39,227 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:49:39,231 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fed814e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:49:39,232 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:49:39,232 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:49:39,232 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:49:39,232 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:49:39,233 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T21:49:39,233 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T21:49:39,233 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:49:39,235 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:49:39,236 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:49:39,237 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:49:39,237 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:49:39,238 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:49:39,239 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:49:39,239 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:49:39,240 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:49:39,241 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:49:39,242 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:49:39,243 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:49:39,245 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:49:39,246 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:49:39,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:39,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:49:39,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,247 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,38967,1733348978643, sessionid=0x100a735cc2d0000, setting cluster-up flag (Was=false) 2024-12-04T21:49:39,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,253 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:49:39,254 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,38967,1733348978643 2024-12-04T21:49:39,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,260 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:49:39,261 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,38967,1733348978643 2024-12-04T21:49:39,262 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:49:39,264 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:39,264 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:49:39,264 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:49:39,264 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,38967,1733348978643 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:49:39,266 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,267 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733349009267 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,268 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:39,268 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:49:39,268 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:49:39,269 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:49:39,269 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:49:39,269 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:49:39,269 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,269 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:49:39,270 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348979269,5,FailOnTimeoutGroup] 2024-12-04T21:49:39,270 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348979270,5,FailOnTimeoutGroup] 2024-12-04T21:49:39,270 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,270 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:49:39,270 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,271 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:49:39,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:49:39,277 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:49:39,278 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a 2024-12-04T21:49:39,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:49:39,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:49:39,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:39,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:49:39,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:49:39,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:49:39,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:49:39,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:49:39,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:49:39,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:49:39,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:49:39,294 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:49:39,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740 2024-12-04T21:49:39,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740 2024-12-04T21:49:39,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:49:39,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:49:39,298 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:49:39,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:49:39,302 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(746): ClusterId : 7e5d7749-a411-4d4f-a37b-2d53840dea68 2024-12-04T21:49:39,302 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:49:39,302 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:49:39,302 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814686, jitterRate=0.03592798113822937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:49:39,303 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:49:39,303 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:49:39,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733348979285Initializing all the Stores at 1733348979286 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979286Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979286Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348979286Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979286Cleaning up temporary data from old regions at 1733348979298 (+12 ms)Region opened successfully at 1733348979304 (+6 ms) 2024-12-04T21:49:39,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:49:39,304 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:49:39,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:49:39,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:49:39,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:49:39,305 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:49:39,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733348979304Disabling compacts and flushes for region at 1733348979304Disabling writes for close at 1733348979304Writing region close event to WAL at 1733348979304Closed at 1733348979304 2024-12-04T21:49:39,305 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:49:39,305 DEBUG [RS:0;bb3046a53f79:39097 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a59cd39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:49:39,306 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:39,306 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:49:39,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:49:39,308 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:49:39,309 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:49:39,318 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:39097 2024-12-04T21:49:39,318 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:49:39,318 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:49:39,318 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:49:39,319 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,38967,1733348978643 with port=39097, startcode=1733348978686 2024-12-04T21:49:39,319 DEBUG [RS:0;bb3046a53f79:39097 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:49:39,321 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56069, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:49:39,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38967 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38967 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,324 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a 2024-12-04T21:49:39,324 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38693 2024-12-04T21:49:39,324 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:49:39,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:49:39,326 DEBUG [RS:0;bb3046a53f79:39097 {}] zookeeper.ZKUtil(111): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,326 WARN [RS:0;bb3046a53f79:39097 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:49:39,326 INFO [RS:0;bb3046a53f79:39097 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:39,326 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,327 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,39097,1733348978686] 2024-12-04T21:49:39,333 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:49:39,335 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:49:39,335 INFO [RS:0;bb3046a53f79:39097 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:49:39,335 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,336 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:49:39,336 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:49:39,337 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,337 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,338 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,338 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,338 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:39,338 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:49:39,338 DEBUG [RS:0;bb3046a53f79:39097 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:49:39,338 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,338 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,338 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,338 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,338 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,338 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,39097,1733348978686-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:49:39,352 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:49:39,352 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,39097,1733348978686-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,352 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,352 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.Replication(171): bb3046a53f79,39097,1733348978686 started 2024-12-04T21:49:39,366 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,366 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,39097,1733348978686, RpcServer on bb3046a53f79/172.17.0.2:39097, sessionid=0x100a735cc2d0001 2024-12-04T21:49:39,366 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:49:39,366 DEBUG [RS:0;bb3046a53f79:39097 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,366 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,39097,1733348978686' 2024-12-04T21:49:39,366 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:49:39,367 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:49:39,368 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:49:39,368 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:49:39,368 DEBUG [RS:0;bb3046a53f79:39097 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,368 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,39097,1733348978686' 2024-12-04T21:49:39,368 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:49:39,368 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:49:39,369 DEBUG [RS:0;bb3046a53f79:39097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:49:39,369 INFO [RS:0;bb3046a53f79:39097 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:49:39,369 INFO [RS:0;bb3046a53f79:39097 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:49:39,460 WARN [bb3046a53f79:38967 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:49:39,472 INFO [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C39097%2C1733348978686, suffix=, logDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686, archiveDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs, maxLogs=32 2024-12-04T21:49:39,474 INFO [RS:0;bb3046a53f79:39097 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.1733348979474 2024-12-04T21:49:39,484 INFO [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 2024-12-04T21:49:39,488 DEBUG [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35275:35275),(127.0.0.1/127.0.0.1:41327:41327)] 2024-12-04T21:49:39,710 DEBUG [bb3046a53f79:38967 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:49:39,712 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,716 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,39097,1733348978686, state=OPENING 2024-12-04T21:49:39,718 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:49:39,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:49:39,721 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:49:39,721 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:39,721 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:39,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,39097,1733348978686}] 2024-12-04T21:49:39,878 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:49:39,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58619, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:49:39,890 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:49:39,890 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:39,893 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C39097%2C1733348978686.meta, suffix=.meta, logDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686, archiveDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs, maxLogs=32 2024-12-04T21:49:39,894 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta 2024-12-04T21:49:39,900 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta 2024-12-04T21:49:39,901 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41327:41327),(127.0.0.1/127.0.0.1:35275:35275)] 2024-12-04T21:49:39,902 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:49:39,903 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:49:39,903 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:49:39,903 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:49:39,903 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:49:39,903 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:39,903 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:49:39,903 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:49:39,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:49:39,906 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:49:39,906 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:49:39,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:49:39,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:49:39,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:49:39,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:49:39,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:49:39,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:39,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:49:39,911 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:49:39,912 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740 2024-12-04T21:49:39,913 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740 2024-12-04T21:49:39,914 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:49:39,914 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:49:39,915 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:49:39,916 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:49:39,917 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745860, jitterRate=-0.051590725779533386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:49:39,917 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:49:39,918 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733348979903Writing region info on filesystem at 1733348979903Initializing all the Stores at 1733348979904 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979904Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979905 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348979905Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733348979905Cleaning up temporary data from old regions at 1733348979914 (+9 ms)Running coprocessor post-open hooks at 1733348979917 (+3 ms)Region opened successfully at 1733348979918 (+1 ms) 2024-12-04T21:49:39,919 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733348979877 2024-12-04T21:49:39,921 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:49:39,921 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:49:39,922 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,923 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,39097,1733348978686, state=OPEN 2024-12-04T21:49:39,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:49:39,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:49:39,925 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,39097,1733348978686 2024-12-04T21:49:39,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:39,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:49:39,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:49:39,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,39097,1733348978686 in 204 msec 2024-12-04T21:49:39,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:49:39,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 623 msec 2024-12-04T21:49:39,933 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:49:39,933 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:49:39,934 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:49:39,934 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,39097,1733348978686, seqNum=-1] 2024-12-04T21:49:39,934 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:49:39,935 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56253, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:49:39,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 677 msec 2024-12-04T21:49:39,941 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733348979941, completionTime=-1 2024-12-04T21:49:39,941 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:49:39,941 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:49:39,943 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:49:39,943 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733349039943 2024-12-04T21:49:39,943 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349099943 2024-12-04T21:49:39,943 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T21:49:39,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38967,1733348978643-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38967,1733348978643-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38967,1733348978643-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:38967, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:39,946 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.226sec 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:49:39,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38967,1733348978643-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:49:39,949 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38967,1733348978643-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:49:39,951 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:49:39,951 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:49:39,951 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,38967,1733348978643-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d4791b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:49:40,003 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,38967,-1 for getting cluster id 2024-12-04T21:49:40,003 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:49:40,007 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7e5d7749-a411-4d4f-a37b-2d53840dea68' 2024-12-04T21:49:40,008 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:49:40,008 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7e5d7749-a411-4d4f-a37b-2d53840dea68" 2024-12-04T21:49:40,008 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b8d6bd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:49:40,009 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,38967,-1] 2024-12-04T21:49:40,009 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:49:40,009 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:49:40,011 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43792, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:49:40,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d873672, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:49:40,012 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:49:40,013 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,39097,1733348978686, seqNum=-1] 2024-12-04T21:49:40,014 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:49:40,015 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:49:40,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,38967,1733348978643 2024-12-04T21:49:40,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:40,021 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:49:40,042 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:49:40,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:40,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:40,042 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:49:40,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:49:40,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:49:40,043 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:49:40,043 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:49:40,063 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42727 2024-12-04T21:49:40,064 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42727 connecting to ZooKeeper ensemble=127.0.0.1:51562 2024-12-04T21:49:40,065 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:40,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:49:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427270x0, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:49:40,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:427270x0, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-04T21:49:40,070 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42727-0x100a735cc2d0002 connected 2024-12-04T21:49:40,070 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-04T21:49:40,071 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:49:40,072 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:49:40,072 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:49:40,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:49:40,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42727 2024-12-04T21:49:40,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42727 2024-12-04T21:49:40,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42727 2024-12-04T21:49:40,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42727 2024-12-04T21:49:40,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42727 2024-12-04T21:49:40,078 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(746): ClusterId : 7e5d7749-a411-4d4f-a37b-2d53840dea68 2024-12-04T21:49:40,078 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:49:40,080 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:49:40,080 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:49:40,081 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:49:40,082 DEBUG [RS:1;bb3046a53f79:42727 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6205cbc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:49:40,093 DEBUG [RS:1;bb3046a53f79:42727 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;bb3046a53f79:42727 2024-12-04T21:49:40,093 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:49:40,093 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:49:40,093 DEBUG [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:49:40,094 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,38967,1733348978643 with port=42727, startcode=1733348980042 2024-12-04T21:49:40,094 DEBUG [RS:1;bb3046a53f79:42727 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:49:40,095 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52493, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:49:40,096 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38967 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,42727,1733348980042 2024-12-04T21:49:40,096 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38967 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,42727,1733348980042 2024-12-04T21:49:40,098 DEBUG [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a 2024-12-04T21:49:40,098 DEBUG [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38693 2024-12-04T21:49:40,098 DEBUG [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:49:40,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:49:40,099 DEBUG [RS:1;bb3046a53f79:42727 {}] zookeeper.ZKUtil(111): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,42727,1733348980042 2024-12-04T21:49:40,099 WARN [RS:1;bb3046a53f79:42727 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:49:40,099 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,42727,1733348980042] 2024-12-04T21:49:40,099 INFO [RS:1;bb3046a53f79:42727 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:49:40,100 DEBUG [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042 2024-12-04T21:49:40,103 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:49:40,105 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:49:40,105 INFO [RS:1;bb3046a53f79:42727 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:49:40,105 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,109 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:49:40,110 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:49:40,110 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:49:40,110 DEBUG [RS:1;bb3046a53f79:42727 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:49:40,111 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,111 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,111 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,111 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,111 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,111 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,42727,1733348980042-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:49:40,125 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:49:40,125 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,42727,1733348980042-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,125 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,125 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.Replication(171): bb3046a53f79,42727,1733348980042 started 2024-12-04T21:49:40,135 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:49:40,135 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,42727,1733348980042, RpcServer on bb3046a53f79/172.17.0.2:42727, sessionid=0x100a735cc2d0002 2024-12-04T21:49:40,136 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:49:40,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;bb3046a53f79:42727,5,FailOnTimeoutGroup] 2024-12-04T21:49:40,136 DEBUG [RS:1;bb3046a53f79:42727 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,42727,1733348980042 2024-12-04T21:49:40,136 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,42727,1733348980042' 2024-12-04T21:49:40,136 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:49:40,136 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-04T21:49:40,136 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T21:49:40,136 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:49:40,137 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:49:40,137 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:49:40,137 DEBUG [RS:1;bb3046a53f79:42727 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,42727,1733348980042 2024-12-04T21:49:40,137 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,42727,1733348980042' 2024-12-04T21:49:40,137 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:49:40,137 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:49:40,137 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is bb3046a53f79,38967,1733348978643 2024-12-04T21:49:40,137 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@30df01e2 2024-12-04T21:49:40,138 DEBUG [RS:1;bb3046a53f79:42727 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:49:40,138 INFO [RS:1;bb3046a53f79:42727 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:49:40,138 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T21:49:40,138 INFO [RS:1;bb3046a53f79:42727 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:49:40,139 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43808, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T21:49:40,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T21:49:40,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T21:49:40,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:49:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T21:49:40,143 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T21:49:40,143 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:40,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-04T21:49:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:49:40,144 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T21:49:40,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741835_1011 (size=393) 2024-12-04T21:49:40,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741835_1011 (size=393) 2024-12-04T21:49:40,153 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e160a432b5502e91bcf2dd834ad52fa7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a 2024-12-04T21:49:40,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40897 is added to blk_1073741836_1012 (size=76) 2024-12-04T21:49:40,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39293 is added to blk_1073741836_1012 (size=76) 2024-12-04T21:49:40,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:40,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing e160a432b5502e91bcf2dd834ad52fa7, disabling compactions & flushes 2024-12-04T21:49:40,162 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. after waiting 0 ms 2024-12-04T21:49:40,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,162 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,162 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for e160a432b5502e91bcf2dd834ad52fa7: Waiting for close lock at 1733348980162Disabling compacts and flushes for region at 1733348980162Disabling writes for close at 1733348980162Writing region close event to WAL at 1733348980162Closed at 1733348980162 2024-12-04T21:49:40,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T21:49:40,165 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733348980164"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733348980164"}]},"ts":"1733348980164"} 2024-12-04T21:49:40,168 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T21:49:40,170 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T21:49:40,170 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733348980170"}]},"ts":"1733348980170"} 2024-12-04T21:49:40,173 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-04T21:49:40,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e160a432b5502e91bcf2dd834ad52fa7, ASSIGN}] 2024-12-04T21:49:40,175 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e160a432b5502e91bcf2dd834ad52fa7, ASSIGN 2024-12-04T21:49:40,177 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e160a432b5502e91bcf2dd834ad52fa7, ASSIGN; state=OFFLINE, location=bb3046a53f79,39097,1733348978686; forceNewPlan=false, retain=false 2024-12-04T21:49:40,242 INFO [RS:1;bb3046a53f79:42727 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C42727%2C1733348980042, suffix=, logDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042, archiveDir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs, maxLogs=32 2024-12-04T21:49:40,244 INFO [RS:1;bb3046a53f79:42727 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C42727%2C1733348980042.1733348980243 2024-12-04T21:49:40,252 INFO [RS:1;bb3046a53f79:42727 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 2024-12-04T21:49:40,254 DEBUG [RS:1;bb3046a53f79:42727 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35275:35275),(127.0.0.1/127.0.0.1:41327:41327)] 2024-12-04T21:49:40,327 INFO [bb3046a53f79:38967 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-04T21:49:40,328 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e160a432b5502e91bcf2dd834ad52fa7, regionState=OPENING, regionLocation=bb3046a53f79,39097,1733348978686 2024-12-04T21:49:40,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e160a432b5502e91bcf2dd834ad52fa7, ASSIGN because future has completed 2024-12-04T21:49:40,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e160a432b5502e91bcf2dd834ad52fa7, server=bb3046a53f79,39097,1733348978686}] 2024-12-04T21:49:40,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:40,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:40,493 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,493 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e160a432b5502e91bcf2dd834ad52fa7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:49:40,494 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,494 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:49:40,494 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,494 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,496 INFO [StoreOpener-e160a432b5502e91bcf2dd834ad52fa7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,497 INFO [StoreOpener-e160a432b5502e91bcf2dd834ad52fa7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e160a432b5502e91bcf2dd834ad52fa7 columnFamilyName info 2024-12-04T21:49:40,497 DEBUG [StoreOpener-e160a432b5502e91bcf2dd834ad52fa7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:49:40,498 INFO [StoreOpener-e160a432b5502e91bcf2dd834ad52fa7-1 {}] regionserver.HStore(327): Store=e160a432b5502e91bcf2dd834ad52fa7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:49:40,498 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,499 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,499 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,500 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,500 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,502 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,504 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:49:40,505 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e160a432b5502e91bcf2dd834ad52fa7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813322, jitterRate=0.034193143248558044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:49:40,505 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:40,505 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e160a432b5502e91bcf2dd834ad52fa7: Running coprocessor pre-open hook at 1733348980494Writing region info on filesystem at 1733348980494Initializing all the Stores at 1733348980495 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733348980495Cleaning up temporary data from old regions at 1733348980500 (+5 ms)Running coprocessor post-open hooks at 1733348980505 (+5 ms)Region opened successfully at 1733348980505 2024-12-04T21:49:40,506 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7., pid=6, masterSystemTime=1733348980484 2024-12-04T21:49:40,509 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,509 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:40,510 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e160a432b5502e91bcf2dd834ad52fa7, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,39097,1733348978686 2024-12-04T21:49:40,511 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38967 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=bb3046a53f79,39097,1733348978686, table=TestLogRolling-testLogRollOnDatanodeDeath, region=e160a432b5502e91bcf2dd834ad52fa7. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-04T21:49:40,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e160a432b5502e91bcf2dd834ad52fa7, server=bb3046a53f79,39097,1733348978686 because future has completed 2024-12-04T21:49:40,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T21:49:40,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e160a432b5502e91bcf2dd834ad52fa7, server=bb3046a53f79,39097,1733348978686 in 183 msec 2024-12-04T21:49:40,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T21:49:40,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e160a432b5502e91bcf2dd834ad52fa7, ASSIGN in 342 msec 2024-12-04T21:49:40,520 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T21:49:40,520 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733348980520"}]},"ts":"1733348980520"} 2024-12-04T21:49:40,522 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-04T21:49:40,523 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T21:49:40,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 383 msec 2024-12-04T21:49:40,861 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:49:40,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:40,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:40,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:40,878 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:44,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T21:49:44,832 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-04T21:49:44,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-04T21:49:45,333 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-04T21:49:45,996 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:49:45,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:46,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:46,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:46,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:49:50,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38967 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:49:50,193 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-04T21:49:50,193 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-04T21:49:50,199 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T21:49:50,199 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:50,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:50,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:50,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:50,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:50,218 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:49:50,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36976750{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:50,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35fa8dab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:50,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da15752{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-40571-hadoop-hdfs-3_4_1-tests_jar-_-any-15416263457689243840/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:50,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@594edb9c{HTTP/1.1, (http/1.1)}{localhost:40571} 2024-12-04T21:49:50,310 INFO [Time-limited test {}] server.Server(415): Started @111240ms 2024-12-04T21:49:50,311 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:50,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:50,343 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:50,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:50,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:50,344 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:49:50,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d12a96d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:50,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f2709e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:50,369 WARN [Thread-821 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data5/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:50,369 WARN [Thread-822 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data6/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:50,383 WARN [Thread-801 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfaf4d17b394a870d with lease ID 0x24418da8b59e6ae4: Processing first storage report for DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e from datanode DatanodeRegistration(127.0.0.1:39969, datanodeUuid=469be2f3-17dc-4773-94e3-efa92295e91f, infoPort=45943, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfaf4d17b394a870d with lease ID 0x24418da8b59e6ae4: from storage DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e node DatanodeRegistration(127.0.0.1:39969, datanodeUuid=469be2f3-17dc-4773-94e3-efa92295e91f, infoPort=45943, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T21:49:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfaf4d17b394a870d with lease ID 0x24418da8b59e6ae4: Processing first storage report for DS-1f38b781-6dbf-4215-bc97-9518d083e241 from datanode DatanodeRegistration(127.0.0.1:39969, datanodeUuid=469be2f3-17dc-4773-94e3-efa92295e91f, infoPort=45943, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfaf4d17b394a870d with lease ID 0x24418da8b59e6ae4: from storage DS-1f38b781-6dbf-4215-bc97-9518d083e241 node DatanodeRegistration(127.0.0.1:39969, datanodeUuid=469be2f3-17dc-4773-94e3-efa92295e91f, infoPort=45943, infoSecurePort=0, ipcPort=39739, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:50,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ad2d2c6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-34929-hadoop-hdfs-3_4_1-tests_jar-_-any-4923526791045933001/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:50,437 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1115d54b{HTTP/1.1, (http/1.1)}{localhost:34929} 2024-12-04T21:49:50,437 INFO [Time-limited test {}] server.Server(415): Started @111367ms 2024-12-04T21:49:50,439 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:50,468 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:49:50,472 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:49:50,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:49:50,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:49:50,473 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:49:50,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47e3d6b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:49:50,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cb49758{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:49:50,500 WARN [Thread-856 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data7/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:50,500 WARN [Thread-857 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data8/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:50,517 WARN [Thread-836 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a81c501d395589b with lease ID 0x24418da8b59e6ae5: Processing first storage report for DS-ee248095-2476-4cef-a4ff-b9ef6a921734 from datanode DatanodeRegistration(127.0.0.1:34351, datanodeUuid=55d959ea-c852-4956-a61a-937210e6c930, infoPort=44203, infoSecurePort=0, ipcPort=43029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a81c501d395589b with lease ID 0x24418da8b59e6ae5: from storage DS-ee248095-2476-4cef-a4ff-b9ef6a921734 node DatanodeRegistration(127.0.0.1:34351, datanodeUuid=55d959ea-c852-4956-a61a-937210e6c930, infoPort=44203, infoSecurePort=0, ipcPort=43029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a81c501d395589b with lease ID 0x24418da8b59e6ae5: Processing first storage report for DS-bf2e9a18-0680-48a8-872c-478ac5868c5a from datanode DatanodeRegistration(127.0.0.1:34351, datanodeUuid=55d959ea-c852-4956-a61a-937210e6c930, infoPort=44203, infoSecurePort=0, ipcPort=43029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a81c501d395589b with lease ID 0x24418da8b59e6ae5: from storage DS-bf2e9a18-0680-48a8-872c-478ac5868c5a node DatanodeRegistration(127.0.0.1:34351, datanodeUuid=55d959ea-c852-4956-a61a-937210e6c930, infoPort=44203, infoSecurePort=0, ipcPort=43029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:50,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52bf7bc9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-45909-hadoop-hdfs-3_4_1-tests_jar-_-any-11001769150025991782/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:50,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@309070cf{HTTP/1.1, (http/1.1)}{localhost:45909} 2024-12-04T21:49:50,568 INFO [Time-limited test {}] server.Server(415): Started @111498ms 2024-12-04T21:49:50,569 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:49:50,625 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:50,625 WARN [Thread-882 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9/current/BP-1821304615-172.17.0.2-1733348978047/current, will proceed with Du for space computation calculation, 2024-12-04T21:49:50,641 WARN [Thread-871 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:49:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x30cd9c0c2dfa152d with lease ID 0x24418da8b59e6ae6: Processing first storage report for DS-8c8213f7-0d4c-4047-a947-b058f90904b9 from datanode DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30cd9c0c2dfa152d with lease ID 0x24418da8b59e6ae6: from storage DS-8c8213f7-0d4c-4047-a947-b058f90904b9 node DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x30cd9c0c2dfa152d with lease ID 0x24418da8b59e6ae6: Processing first storage report for DS-6e446c05-aaff-4ceb-8cae-b33fc442c955 from datanode DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047) 2024-12-04T21:49:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30cd9c0c2dfa152d with lease ID 0x24418da8b59e6ae6: from storage DS-6e446c05-aaff-4ceb-8cae-b33fc442c955 node DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:49:50,687 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,687 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,688 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,689 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 block BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:50,689 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta block BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:50,689 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 block BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:50,689 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,689 WARN [PacketResponder: BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40897] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,690 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 block BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:50,689 WARN [PacketResponder: BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40897] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,690 WARN [PacketResponder: BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40897] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,691 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:51642 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51642 dst: /127.0.0.1:39293 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,691 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:51672 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51672 dst: /127.0.0.1:39293 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,691 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:33692 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33692 dst: /127.0.0.1:40897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:51656 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51656 dst: /127.0.0.1:39293 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_977013566_22 at /127.0.0.1:33726 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33726 dst: /127.0.0.1:40897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:33662 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33662 dst: /127.0.0.1:40897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:33684 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33684 dst: /127.0.0.1:40897 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_977013566_22 at /127.0.0.1:51698 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51698 dst: /127.0.0.1:39293 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21fae364{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:50,695 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cbabe3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:50,695 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:50,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46fa755{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:50,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27772cb6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:50,697 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:50,697 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:50,697 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:50,697 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1821304615-172.17.0.2-1733348978047 (Datanode Uuid d1e53198-d7bc-4599-888e-c6fe62466d4c) service to localhost/127.0.0.1:38693 2024-12-04T21:49:50,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data3/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:50,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data4/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:50,698 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:50,700 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 block BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:45458 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45458 dst: /127.0.0.1:39293 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_977013566_22 at /127.0.0.1:45426 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45426 dst: /127.0.0.1:39293 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:45422 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45422 dst: /127.0.0.1:39293 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,702 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,702 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,702 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:45428 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39293:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45428 dst: /127.0.0.1:39293 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:50,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4317eac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:50,703 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@499897b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:50,703 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:50,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7c849b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:50,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@430ff8b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:50,705 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:50,705 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:50,705 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:50,705 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1821304615-172.17.0.2-1733348978047 (Datanode Uuid 4ea1e97a-75b8-4db3-a557-fb6227b680da) service to localhost/127.0.0.1:38693 2024-12-04T21:49:50,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data1/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:50,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data2/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:50,706 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:50,709 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7., hostname=bb3046a53f79,39097,1733348978686, seqNum=2] 2024-12-04T21:49:50,710 ERROR [FSHLog-0-hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a-prefix:bb3046a53f79,39097,1733348978686 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,710 WARN [FSHLog-0-hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a-prefix:bb3046a53f79,39097,1733348978686 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,710 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,711 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C39097%2C1733348978686:(num 1733348979474) roll requested 2024-12-04T21:49:50,711 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.1733348990711 2024-12-04T21:49:50,716 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:50,716 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:50,716 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:50,716 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:50,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:50,717 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348990711 2024-12-04T21:49:50,717 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,717 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:50,718 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-04T21:49:50,718 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-04T21:49:50,718 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 2024-12-04T21:49:50,719 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45943:45943),(127.0.0.1/127.0.0.1:44203:44203)] 2024-12-04T21:49:50,719 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:50,721 WARN [IPC Server handler 3 on default port 38693 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-04T21:49:50,725 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 after 5ms 2024-12-04T21:49:50,765 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:52,112 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:52,720 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:52,721 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348990711 2024-12-04T21:49:52,721 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:52,722 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348990711 block BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:52,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:55172 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55172 dst: /127.0.0.1:39969 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:52,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:59448 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34351:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59448 dst: /127.0.0.1:34351 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:52,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da15752{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:52,727 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@594edb9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:52,727 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:52,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35fa8dab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:52,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36976750{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:52,730 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:52,730 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:52,730 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1821304615-172.17.0.2-1733348978047 (Datanode Uuid 469be2f3-17dc-4773-94e3-efa92295e91f) service to localhost/127.0.0.1:38693 2024-12-04T21:49:52,730 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:52,730 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data5/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:52,730 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data6/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:52,731 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:52,765 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:54,112 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:54,720 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:54,721 WARN [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]] 2024-12-04T21:49:54,721 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C39097%2C1733348978686:(num 1733348990711) roll requested 2024-12-04T21:49:54,722 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.1733348994722 2024-12-04T21:49:54,727 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 after 4009ms 2024-12-04T21:49:54,731 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39969 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:54,731 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:59468 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data8]'}, localName='127.0.0.1:34351', datanodeUuid='55d959ea-c852-4956-a61a-937210e6c930', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741839_1021 to mirror 127.0.0.1:39969 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:54,731 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:54,732 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741839_1021 2024-12-04T21:49:54,732 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:59468 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T21:49:54,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:59468 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34351:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59468 dst: /127.0.0.1:34351 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:54,734 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:54,735 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T21:49:54,738 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:54,738 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:54,738 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741840_1022 2024-12-04T21:49:54,738 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:54,740 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:54,740 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39592 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741841_1023 to mirror 127.0.0.1:40897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:54,741 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:54,741 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741841_1023 2024-12-04T21:49:54,741 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39592 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T21:49:54,741 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39592 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39592 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:54,741 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:54,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:54,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:54,746 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:54,746 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:54,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:54,746 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348990711 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348994722 2024-12-04T21:49:54,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34351 is added to blk_1073741838_1020 (size=3600) 2024-12-04T21:49:54,749 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44203:44203),(127.0.0.1/127.0.0.1:46571:46571)] 2024-12-04T21:49:54,749 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:54,749 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348990711 is not closed yet, will try archiving it next time 2024-12-04T21:49:54,766 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:55,151 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:56,113 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,536 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@46cf2638[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34351, datanodeUuid=55d959ea-c852-4956-a61a-937210e6c930, infoPort=44203, infoSecurePort=0, ipcPort=43029, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741838_1020 to 127.0.0.1:39969 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,740 WARN [ResponseProcessor for block BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,741 WARN [DataStreamer for file /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348994722 block BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:56,741 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:59470 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34351:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59470 dst: /127.0.0.1:34351 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,742 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39598 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39598 dst: /127.0.0.1:34003 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ad2d2c6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:49:56,745 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1115d54b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:49:56,745 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:49:56,746 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f2709e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:49:56,746 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d12a96d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:49:56,747 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:49:56,747 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:49:56,748 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:49:56,748 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1821304615-172.17.0.2-1733348978047 (Datanode Uuid 55d959ea-c852-4956-a61a-937210e6c930) service to localhost/127.0.0.1:38693 2024-12-04T21:49:56,748 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data7/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:56,748 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data8/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:49:56,749 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:49:56,750 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,750 WARN [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]] 2024-12-04T21:49:56,750 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C39097%2C1733348978686:(num 1733348994722) roll requested 2024-12-04T21:49:56,751 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.1733348996750 2024-12-04T21:49:56,753 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,753 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:56,753 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741843_1026 2024-12-04T21:49:56,754 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:56,756 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39969 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,756 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39614 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741844_1027 to mirror 127.0.0.1:39969 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,756 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:56,756 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741844_1027 2024-12-04T21:49:56,756 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39614 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T21:49:56,757 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39614 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39614 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,757 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:56,758 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,758 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:56,758 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741845_1028 2024-12-04T21:49:56,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:56,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e160a432b5502e91bcf2dd834ad52fa7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:49:56,759 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:56,761 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,761 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:56,761 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741846_1029 2024-12-04T21:49:56,762 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:49:56,763 WARN [IPC Server handler 3 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:49:56,763 WARN [IPC Server handler 3 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:49:56,763 WARN [IPC Server handler 3 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:49:56,766 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,770 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:56,770 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:56,770 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:56,770 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:56,770 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:56,770 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348994722 with entries=9, filesize=9.53 KB; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348996750 2024-12-04T21:49:56,771 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46571:46571)] 2024-12-04T21:49:56,771 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:56,771 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348994722 is not closed yet, will try archiving it next time 2024-12-04T21:49:56,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741842_1025 (size=9768) 2024-12-04T21:49:56,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/a46e85636e0542efbb4855dedf083318 is 1080, key is row0002/info:/1733348992732/Put/seqid=0 2024-12-04T21:49:56,781 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,781 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:56,781 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741848_1031 2024-12-04T21:49:56,782 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:56,784 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39293 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39630 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741849_1032 to mirror 127.0.0.1:39293 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,784 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:56,784 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741849_1032 2024-12-04T21:49:56,784 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39630 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:49:56,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39630 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39630 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,784 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:56,786 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40897 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39636 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741850_1033 to mirror 127.0.0.1:40897 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,787 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:56,787 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741850_1033 2024-12-04T21:49:56,787 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39636 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:49:56,787 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39636 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39636 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,787 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:56,789 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34351 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:56,789 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39652 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741851_1034 to mirror 127.0.0.1:34351 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,790 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:56,790 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741851_1034 2024-12-04T21:49:56,790 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39652 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:49:56,790 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39652 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39652 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:56,790 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:49:56,791 WARN [IPC Server handler 0 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:49:56,791 WARN [IPC Server handler 0 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:49:56,791 WARN [IPC Server handler 0 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:49:56,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741852_1035 (size=10347) 2024-12-04T21:49:57,173 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:57,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/a46e85636e0542efbb4855dedf083318 2024-12-04T21:49:57,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/a46e85636e0542efbb4855dedf083318 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/a46e85636e0542efbb4855dedf083318 2024-12-04T21:49:57,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/a46e85636e0542efbb4855dedf083318, entries=5, sequenceid=11, filesize=10.1 K 2024-12-04T21:49:57,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for e160a432b5502e91bcf2dd834ad52fa7 in 459ms, sequenceid=11, compaction requested=false 2024-12-04T21:49:57,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:49:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:57,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e160a432b5502e91bcf2dd834ad52fa7 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-04T21:49:57,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/ac9f777e6f274644a856a9ded49b301e is 1080, key is row0007/info:/1733348996760/Put/seqid=0 2024-12-04T21:49:57,393 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:57,393 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:57,393 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741853_1036 2024-12-04T21:49:57,394 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:49:57,395 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:57,395 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:57,396 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741854_1037 2024-12-04T21:49:57,396 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:57,397 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:57,397 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:57,397 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741855_1038 2024-12-04T21:49:57,398 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:57,399 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:57,399 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:57,399 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741856_1039 2024-12-04T21:49:57,400 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:57,400 WARN [IPC Server handler 2 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:49:57,401 WARN [IPC Server handler 2 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:49:57,401 WARN [IPC Server handler 2 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:49:57,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741857_1040 (size=12506) 2024-12-04T21:49:57,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/ac9f777e6f274644a856a9ded49b301e 2024-12-04T21:49:57,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/ac9f777e6f274644a856a9ded49b301e as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e 2024-12-04T21:49:57,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e, entries=7, sequenceid=24, filesize=12.2 K 2024-12-04T21:49:57,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for e160a432b5502e91bcf2dd834ad52fa7 in 439ms, sequenceid=24, compaction requested=false 2024-12-04T21:49:57,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:49:57,825 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-04T21:49:57,825 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:57,825 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e because midkey is the same as first or last row 2024-12-04T21:49:58,113 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,766 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,772 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,772 WARN [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]] 2024-12-04T21:49:58,772 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C39097%2C1733348978686:(num 1733348996750) roll requested 2024-12-04T21:49:58,772 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.1733348998772 2024-12-04T21:49:58,774 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,775 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:58,775 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741858_1041 2024-12-04T21:49:58,775 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:49:58,776 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,776 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:58,776 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741859_1042 2024-12-04T21:49:58,777 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:58,778 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,778 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:58,778 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741860_1043 2024-12-04T21:49:58,778 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:58,780 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39293 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39696 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741861_1044 to mirror 127.0.0.1:39293 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:58,780 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:58,780 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741861_1044 2024-12-04T21:49:58,780 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39696 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T21:49:58,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39696 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39696 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:58,781 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:58,782 WARN [IPC Server handler 4 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:49:58,782 WARN [IPC Server handler 4 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:49:58,782 WARN [IPC Server handler 4 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:49:58,784 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:58,784 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:58,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:58,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:58,785 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:49:58,785 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348996750 with entries=15, filesize=14.79 KB; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348998772 2024-12-04T21:49:58,786 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46571:46571)] 2024-12-04T21:49:58,786 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:58,786 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348996750 is not closed yet, will try archiving it next time 2024-12-04T21:49:58,786 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348990711 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs/bb3046a53f79%2C39097%2C1733348978686.1733348990711 2024-12-04T21:49:58,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741847_1030 (size=15148) 2024-12-04T21:49:58,787 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348994722 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs/bb3046a53f79%2C39097%2C1733348978686.1733348994722 2024-12-04T21:49:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:49:58,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e160a432b5502e91bcf2dd834ad52fa7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-04T21:49:58,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/871e420e2b8d478ea060e2179c5cb48d is 1079, key is tmprow/info:/1733348998811/Put/seqid=0 2024-12-04T21:49:58,818 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,818 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:58,818 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741863_1046 2024-12-04T21:49:58,818 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:58,819 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,820 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:58,820 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741864_1047 2024-12-04T21:49:58,820 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:49:58,821 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,821 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:58,821 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741865_1048 2024-12-04T21:49:58,821 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:58,823 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39969 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:58,823 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39714 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741866_1049 to mirror 127.0.0.1:39969 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:58,823 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:58,823 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741866_1049 2024-12-04T21:49:58,823 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39714 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:49:58,823 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:39714 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39714 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:58,824 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:58,824 WARN [IPC Server handler 3 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:49:58,824 WARN [IPC Server handler 3 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:49:58,824 WARN [IPC Server handler 3 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:49:58,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741867_1050 (size=6027) 2024-12-04T21:49:59,189 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 is not closed yet, will try archiving it next time 2024-12-04T21:49:59,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/871e420e2b8d478ea060e2179c5cb48d 2024-12-04T21:49:59,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/871e420e2b8d478ea060e2179c5cb48d as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/871e420e2b8d478ea060e2179c5cb48d 2024-12-04T21:49:59,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/871e420e2b8d478ea060e2179c5cb48d, entries=1, sequenceid=34, filesize=5.9 K 2024-12-04T21:49:59,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e160a432b5502e91bcf2dd834ad52fa7 in 432ms, sequenceid=34, compaction requested=true 2024-12-04T21:49:59,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:49:59,244 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-04T21:49:59,244 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:59,244 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e because midkey is the same as first or last row 2024-12-04T21:49:59,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e160a432b5502e91bcf2dd834ad52fa7:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:49:59,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:49:59,245 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:49:59,246 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:49:59,246 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.HStore(1541): e160a432b5502e91bcf2dd834ad52fa7/info is initiating minor compaction (all files) 2024-12-04T21:49:59,246 INFO [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e160a432b5502e91bcf2dd834ad52fa7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:49:59,247 INFO [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/a46e85636e0542efbb4855dedf083318, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/871e420e2b8d478ea060e2179c5cb48d] into tmpdir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp, totalSize=28.2 K 2024-12-04T21:49:59,247 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting a46e85636e0542efbb4855dedf083318, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733348992732 2024-12-04T21:49:59,247 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting ac9f777e6f274644a856a9ded49b301e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733348996760 2024-12-04T21:49:59,248 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] compactions.Compactor(225): Compacting 871e420e2b8d478ea060e2179c5cb48d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733348998811 2024-12-04T21:49:59,263 INFO [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e160a432b5502e91bcf2dd834ad52fa7#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:49:59,263 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/3a0757bb15c34a8d80cab86fa06849e2 is 1080, key is row0002/info:/1733348992732/Put/seqid=0 2024-12-04T21:49:59,265 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:59,265 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:49:59,265 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741868_1051 2024-12-04T21:49:59,266 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:49:59,267 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:59,267 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:49:59,267 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741869_1052 2024-12-04T21:49:59,268 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:49:59,269 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:59,270 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:49:59,270 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741870_1053 2024-12-04T21:49:59,270 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:49:59,271 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:49:59,272 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:49:59,272 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741871_1054 2024-12-04T21:49:59,272 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:49:59,273 WARN [IPC Server handler 4 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:49:59,273 WARN [IPC Server handler 4 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:49:59,273 WARN [IPC Server handler 4 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:49:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741872_1055 (size=17994) 2024-12-04T21:49:59,646 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11ada236[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741842_1025 to 127.0.0.1:39293 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:59,647 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@159224db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741852_1035 to 127.0.0.1:39293 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:49:59,695 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/3a0757bb15c34a8d80cab86fa06849e2 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 2024-12-04T21:49:59,705 INFO [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e160a432b5502e91bcf2dd834ad52fa7/info of e160a432b5502e91bcf2dd834ad52fa7 into 3a0757bb15c34a8d80cab86fa06849e2(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:49:59,705 INFO [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7., storeName=e160a432b5502e91bcf2dd834ad52fa7/info, priority=13, startTime=1733348999245; duration=0sec 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 because midkey is the same as first or last row 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 because midkey is the same as first or last row 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-04T21:49:59,705 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:49:59,706 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 because midkey is the same as first or last row 2024-12-04T21:49:59,706 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:49:59,706 DEBUG [RS:0;bb3046a53f79:39097-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e160a432b5502e91bcf2dd834ad52fa7:info 2024-12-04T21:50:00,114 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:50:00,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e160a432b5502e91bcf2dd834ad52fa7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-04T21:50:00,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/0b85254df7e54ce28b42808bcf41f90a is 1079, key is tmprow/info:/1733349000240/Put/seqid=0 2024-12-04T21:50:00,253 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,253 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:50:00,253 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741873_1056 2024-12-04T21:50:00,254 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:50:00,255 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,255 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:00,255 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741874_1057 2024-12-04T21:50:00,256 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:00,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:40520 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741875_1058 to mirror 127.0.0.1:34351 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:00,258 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34351 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,258 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:40520 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:50:00,258 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:00,258 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741875_1058 2024-12-04T21:50:00,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:40520 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40520 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:00,259 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:00,260 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,260 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]) is bad. 2024-12-04T21:50:00,260 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741876_1059 2024-12-04T21:50:00,261 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40897,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK] 2024-12-04T21:50:00,262 WARN [IPC Server handler 2 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-04T21:50:00,262 WARN [IPC Server handler 2 on default port 38693 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-04T21:50:00,262 WARN [IPC Server handler 2 on default port 38693 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-04T21:50:00,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741877_1060 (size=6027) 2024-12-04T21:50:00,646 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@159224db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741857_1040 to 127.0.0.1:34351 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:00,646 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11ada236[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741847_1030 to 127.0.0.1:40897 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:00,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/0b85254df7e54ce28b42808bcf41f90a 2024-12-04T21:50:00,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/0b85254df7e54ce28b42808bcf41f90a as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/0b85254df7e54ce28b42808bcf41f90a 2024-12-04T21:50:00,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/0b85254df7e54ce28b42808bcf41f90a, entries=1, sequenceid=45, filesize=5.9 K 2024-12-04T21:50:00,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e160a432b5502e91bcf2dd834ad52fa7 in 446ms, sequenceid=45, compaction requested=false 2024-12-04T21:50:00,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:50:00,688 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-04T21:50:00,688 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:50:00,688 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 because midkey is the same as first or last row 2024-12-04T21:50:00,767 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,786 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:00,786 WARN [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-04T21:50:00,867 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:00,869 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:00,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:00,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:00,870 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:50:00,871 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66f331b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:00,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aae8f75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:00,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b3c44a7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/java.io.tmpdir/jetty-localhost-40991-hadoop-hdfs-3_4_1-tests_jar-_-any-14317908969539607488/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:00,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6682cb77{HTTP/1.1, (http/1.1)}{localhost:40991} 2024-12-04T21:50:00,972 INFO [Time-limited test {}] server.Server(415): Started @121902ms 2024-12-04T21:50:00,973 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:01,041 WARN [Thread-981 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:01,049 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x585f62213abaea9d with lease ID 0x24418da8b59e6ae7: from storage DS-355869a6-3520-426c-9c30-2d5062b9d3c1 node DatanodeRegistration(127.0.0.1:38171, datanodeUuid=d1e53198-d7bc-4599-888e-c6fe62466d4c, infoPort=37229, infoSecurePort=0, ipcPort=40887, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T21:50:01,049 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x585f62213abaea9d with lease ID 0x24418da8b59e6ae7: from storage DS-11239e35-2846-4a20-a4e6-f3085aa93be7 node DatanodeRegistration(127.0.0.1:38171, datanodeUuid=d1e53198-d7bc-4599-888e-c6fe62466d4c, infoPort=37229, infoSecurePort=0, ipcPort=40887, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:02,115 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:02,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11ada236[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741867_1050 to 127.0.0.1:34351 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:02,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741872_1055 (size=17994) 2024-12-04T21:50:02,767 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:02,787 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:03,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741877_1060 (size=6027) 2024-12-04T21:50:04,115 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:04,767 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:04,787 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:06,116 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:06,768 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:06,787 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:08,116 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:08,620 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:50:08,769 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:08,788 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,270 ERROR [FSHLog-0-hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData-prefix:bb3046a53f79,38967,1733348978643 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,270 WARN [FSHLog-0-hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData-prefix:bb3046a53f79,38967,1733348978643 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,271 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C38967%2C1733348978643:(num 1733348979193) roll requested 2024-12-04T21:50:09,272 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C38967%2C1733348978643.1733349009271 2024-12-04T21:50:09,276 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,277 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:50:09,277 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741878_1061 2024-12-04T21:50:09,278 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:50:09,281 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39969 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:55576 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741879_1062 to mirror 127.0.0.1:39969 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:09,281 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:09,281 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741879_1062 2024-12-04T21:50:09,281 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:55576 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T21:50:09,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:55576 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55576 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:09,282 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:09,285 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:55592 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741880_1063 to mirror 127.0.0.1:34351 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:09,285 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34351 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,285 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:55592 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-04T21:50:09,285 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:09,285 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741880_1063 2024-12-04T21:50:09,285 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-476235279_22 at /127.0.0.1:55592 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55592 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:09,286 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:09,292 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:09,292 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:09,292 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:09,292 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:09,292 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:09,293 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733349009271 2024-12-04T21:50:09,293 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,294 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:09,294 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 2024-12-04T21:50:09,294 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46571:46571),(127.0.0.1/127.0.0.1:37229:37229)] 2024-12-04T21:50:09,294 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 is not closed yet, will try archiving it next time 2024-12-04T21:50:09,294 WARN [IPC Server handler 4 on default port 38693 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 has not been closed. Lease recovery is in progress. RecoveryId = 1065 for block blk_1073741830_1014 2024-12-04T21:50:09,295 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 after 1ms 2024-12-04T21:50:10,117 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:10,788 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:11,064 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6494e58d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1821304615-172.17.0.2-1733348978047:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39293,null,null]) java.net.ConnectException: Call From bb3046a53f79/172.17.0.2 to localhost:46515 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-04T21:50:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741833_1019 (size=455) 2024-12-04T21:50:11,753 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348979474 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs/bb3046a53f79%2C39097%2C1733348978686.1733348979474 2024-12-04T21:50:11,757 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348996750 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs/bb3046a53f79%2C39097%2C1733348978686.1733348996750 2024-12-04T21:50:12,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741833_1019 (size=455) 2024-12-04T21:50:12,118 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:12,789 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:13,298 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 after 4003ms 2024-12-04T21:50:14,118 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:14,790 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,119 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,617 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.1733349016616 2024-12-04T21:50:16,624 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,624 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:16,624 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741882_1066 2024-12-04T21:50:16,625 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:16,627 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,627 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:16,627 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741883_1067 2024-12-04T21:50:16,628 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:16,633 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,634 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,634 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,634 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,634 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,634 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348998772 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733349016616 2024-12-04T21:50:16,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46571:46571),(127.0.0.1/127.0.0.1:37229:37229)] 2024-12-04T21:50:16,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348998772 is not closed yet, will try archiving it next time 2024-12-04T21:50:16,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741862_1045 (size=13591) 2024-12-04T21:50:16,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39097 {}] regionserver.HRegion(8855): Flush requested on e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:50:16,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e160a432b5502e91bcf2dd834ad52fa7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-04T21:50:16,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/26673cf158ec4003991f9f1f4586c6a4 is 1080, key is row0013/info:/1733349016637/Put/seqid=0 2024-12-04T21:50:16,657 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,657 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:50:16,657 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741885_1069 2024-12-04T21:50:16,658 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:50:16,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741886_1070 (size=11421) 2024-12-04T21:50:16,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741886_1070 (size=11421) 2024-12-04T21:50:16,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/26673cf158ec4003991f9f1f4586c6a4 2024-12-04T21:50:16,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/26673cf158ec4003991f9f1f4586c6a4 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/26673cf158ec4003991f9f1f4586c6a4 2024-12-04T21:50:16,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/26673cf158ec4003991f9f1f4586c6a4, entries=6, sequenceid=55, filesize=11.2 K 2024-12-04T21:50:16,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for e160a432b5502e91bcf2dd834ad52fa7 in 26ms, sequenceid=55, compaction requested=true 2024-12-04T21:50:16,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:50:16,675 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-04T21:50:16,675 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:50:16,675 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 because midkey is the same as first or last row 2024-12-04T21:50:16,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e160a432b5502e91bcf2dd834ad52fa7:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:50:16,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:50:16,676 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:50:16,677 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:50:16,677 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.HStore(1541): e160a432b5502e91bcf2dd834ad52fa7/info is initiating minor compaction (all files) 2024-12-04T21:50:16,677 INFO [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e160a432b5502e91bcf2dd834ad52fa7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:50:16,677 INFO [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/0b85254df7e54ce28b42808bcf41f90a, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/26673cf158ec4003991f9f1f4586c6a4] into tmpdir=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp, totalSize=34.6 K 2024-12-04T21:50:16,678 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting 3a0757bb15c34a8d80cab86fa06849e2, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733348992732 2024-12-04T21:50:16,678 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting 0b85254df7e54ce28b42808bcf41f90a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733349000240 2024-12-04T21:50:16,678 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] compactions.Compactor(225): Compacting 26673cf158ec4003991f9f1f4586c6a4, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733349000650 2024-12-04T21:50:16,695 INFO [RS:0;bb3046a53f79:39097-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e160a432b5502e91bcf2dd834ad52fa7#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:50:16,695 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/7bab5f254a094a3e8aadc940037557ba is 1080, key is row0002/info:/1733348992732/Put/seqid=0 2024-12-04T21:50:16,697 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,697 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:38171,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:16,697 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741887_1071 2024-12-04T21:50:16,698 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:16,700 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39969 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,700 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:55634 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741888_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10]'}, localName='127.0.0.1:34003', datanodeUuid='7223e6c5-6700-434a-ad9b-af10c996f444', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741888_1072 to mirror 127.0.0.1:39969 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:16,700 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:16,700 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741888_1072 2024-12-04T21:50:16,700 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:55634 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741888_1072] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:50:16,700 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:55634 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741888_1072] {}] datanode.DataXceiver(331): 127.0.0.1:34003:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55634 dst: /127.0.0.1:34003 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:16,700 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:16,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741889_1073 (size=23502) 2024-12-04T21:50:16,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741889_1073 (size=23502) 2024-12-04T21:50:16,712 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/7bab5f254a094a3e8aadc940037557ba as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/7bab5f254a094a3e8aadc940037557ba 2024-12-04T21:50:16,721 INFO [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e160a432b5502e91bcf2dd834ad52fa7/info of e160a432b5502e91bcf2dd834ad52fa7 into 7bab5f254a094a3e8aadc940037557ba(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:50:16,721 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e160a432b5502e91bcf2dd834ad52fa7: 2024-12-04T21:50:16,721 INFO [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7., storeName=e160a432b5502e91bcf2dd834ad52fa7/info, priority=13, startTime=1733349016675; duration=0sec 2024-12-04T21:50:16,721 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-04T21:50:16,721 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:50:16,721 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/7bab5f254a094a3e8aadc940037557ba because midkey is the same as first or last row 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/7bab5f254a094a3e8aadc940037557ba because midkey is the same as first or last row 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/7bab5f254a094a3e8aadc940037557ba because midkey is the same as first or last row 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:50:16,722 DEBUG [RS:0;bb3046a53f79:39097-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e160a432b5502e91bcf2dd834ad52fa7:info 2024-12-04T21:50:16,790 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,791 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-04T21:50:16,864 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:50:16,864 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:50:16,865 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:16,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:16,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:16,865 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:50:16,865 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:50:16,865 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=698256917, stopped=false 2024-12-04T21:50:16,865 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,38967,1733348978643 2024-12-04T21:50:16,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:16,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:16,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:16,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:16,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:16,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:16,867 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:50:16,867 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:50:16,867 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:16,867 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:16,867 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:16,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:16,868 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,39097,1733348978686' ***** 2024-12-04T21:50:16,868 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:50:16,868 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:16,868 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,42727,1733348980042' ***** 2024-12-04T21:50:16,868 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:50:16,868 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:50:16,868 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:50:16,868 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:50:16,868 INFO [RS:0;bb3046a53f79:39097 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:50:16,868 INFO [RS:1;bb3046a53f79:42727 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:50:16,868 INFO [RS:1;bb3046a53f79:42727 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:50:16,868 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,42727,1733348980042 2024-12-04T21:50:16,868 INFO [RS:1;bb3046a53f79:42727 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:50:16,868 INFO [RS:1;bb3046a53f79:42727 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;bb3046a53f79:42727. 2024-12-04T21:50:16,868 DEBUG [RS:1;bb3046a53f79:42727 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:16,868 DEBUG [RS:1;bb3046a53f79:42727 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:16,869 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,42727,1733348980042; all regions closed. 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(3091): Received CLOSE for e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:50:16,869 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,869 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,39097,1733348978686 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:50:16,869 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:39097. 2024-12-04T21:50:16,869 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,869 DEBUG [RS:0;bb3046a53f79:39097 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:16,869 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,869 DEBUG [RS:0;bb3046a53f79:39097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:16,869 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e160a432b5502e91bcf2dd834ad52fa7, disabling compactions & flushes 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:50:16,869 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:50:16,869 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:50:16,869 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:50:16,870 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:50:16,870 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. after waiting 0 ms 2024-12-04T21:50:16,870 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:50:16,870 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:50:16,870 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e160a432b5502e91bcf2dd834ad52fa7 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-04T21:50:16,871 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T21:50:16,871 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1325): Online Regions={e160a432b5502e91bcf2dd834ad52fa7=TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7., 1588230740=hbase:meta,,1.1588230740} 2024-12-04T21:50:16,871 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,871 DEBUG [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e160a432b5502e91bcf2dd834ad52fa7 2024-12-04T21:50:16,871 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:50:16,871 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:50:16,871 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,871 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:50:16,871 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 2024-12-04T21:50:16,871 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:50:16,871 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:50:16,871 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-04T21:50:16,871 WARN [IPC Server handler 0 on default port 38693 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741837_1015 2024-12-04T21:50:16,871 ERROR [FSHLog-0-hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a-prefix:bb3046a53f79,39097,1733348978686.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,871 WARN [FSHLog-0-hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a-prefix:bb3046a53f79,39097,1733348978686.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,872 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C39097%2C1733348978686.meta:.meta(num 1733348979893) roll requested 2024-12-04T21:50:16,872 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 after 1ms 2024-12-04T21:50:16,872 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C39097%2C1733348978686.meta.1733349016872.meta 2024-12-04T21:50:16,874 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,874 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741890_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:50:16,874 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741890_1075 2024-12-04T21:50:16,875 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:50:16,875 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/596bb9248eb54b3e94f893fb2f10bb29 is 1080, key is row0018/info:/1733349016650/Put/seqid=0 2024-12-04T21:50:16,876 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,876 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741891_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:16,876 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741891_1076 2024-12-04T21:50:16,876 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:16,877 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,877 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741892_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:16,877 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741892_1077 2024-12-04T21:50:16,877 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:50:16,877 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:16,878 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,878 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:16,878 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741894_1079 2024-12-04T21:50:16,879 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:16,882 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,882 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:16,882 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733349016872.meta 2024-12-04T21:50:16,885 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,885 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,885 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta 2024-12-04T21:50:16,886 WARN [IPC Server handler 0 on default port 38693 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1016 2024-12-04T21:50:16,886 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta after 1ms 2024-12-04T21:50:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741895_1080 (size=11421) 2024-12-04T21:50:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741895_1080 (size=11421) 2024-12-04T21:50:16,887 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/596bb9248eb54b3e94f893fb2f10bb29 2024-12-04T21:50:16,892 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46571:46571),(127.0.0.1/127.0.0.1:37229:37229)] 2024-12-04T21:50:16,892 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta is not closed yet, will try archiving it next time 2024-12-04T21:50:16,894 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/.tmp/info/596bb9248eb54b3e94f893fb2f10bb29 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/596bb9248eb54b3e94f893fb2f10bb29 2024-12-04T21:50:16,900 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/596bb9248eb54b3e94f893fb2f10bb29, entries=6, sequenceid=65, filesize=11.2 K 2024-12-04T21:50:16,901 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e160a432b5502e91bcf2dd834ad52fa7 in 31ms, sequenceid=65, compaction requested=false 2024-12-04T21:50:16,902 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/a46e85636e0542efbb4855dedf083318, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/871e420e2b8d478ea060e2179c5cb48d, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/0b85254df7e54ce28b42808bcf41f90a, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/26673cf158ec4003991f9f1f4586c6a4] to archive 2024-12-04T21:50:16,903 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T21:50:16,905 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/a46e85636e0542efbb4855dedf083318 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/a46e85636e0542efbb4855dedf083318 2024-12-04T21:50:16,907 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/ac9f777e6f274644a856a9ded49b301e 2024-12-04T21:50:16,908 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/3a0757bb15c34a8d80cab86fa06849e2 2024-12-04T21:50:16,909 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/871e420e2b8d478ea060e2179c5cb48d to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/871e420e2b8d478ea060e2179c5cb48d 2024-12-04T21:50:16,911 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/0b85254df7e54ce28b42808bcf41f90a to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/0b85254df7e54ce28b42808bcf41f90a 2024-12-04T21:50:16,912 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/info/b79a1e9fd87d44339b33cc11dafb9a87 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7./info:regioninfo/1733348980510/Put/seqid=0 2024-12-04T21:50:16,913 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/26673cf158ec4003991f9f1f4586c6a4 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/info/26673cf158ec4003991f9f1f4586c6a4 2024-12-04T21:50:16,913 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bb3046a53f79:38967 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T21:50:16,913 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a46e85636e0542efbb4855dedf083318=10347, ac9f777e6f274644a856a9ded49b301e=12506, 3a0757bb15c34a8d80cab86fa06849e2=17994, 871e420e2b8d478ea060e2179c5cb48d=6027, 0b85254df7e54ce28b42808bcf41f90a=6027, 26673cf158ec4003991f9f1f4586c6a4=11421] 2024-12-04T21:50:16,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741896_1082 (size=7089) 2024-12-04T21:50:16,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741896_1082 (size=7089) 2024-12-04T21:50:16,919 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/info/b79a1e9fd87d44339b33cc11dafb9a87 2024-12-04T21:50:16,920 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e160a432b5502e91bcf2dd834ad52fa7/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-04T21:50:16,920 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:50:16,920 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e160a432b5502e91bcf2dd834ad52fa7: Waiting for close lock at 1733349016869Running coprocessor pre-close hooks at 1733349016869Disabling compacts and flushes for region at 1733349016869Disabling writes for close at 1733349016870 (+1 ms)Obtaining lock to block concurrent updates at 1733349016870Preparing flush snapshotting stores in e160a432b5502e91bcf2dd834ad52fa7 at 1733349016870Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733349016870Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. at 1733349016871 (+1 ms)Flushing e160a432b5502e91bcf2dd834ad52fa7/info: creating writer at 1733349016871Flushing e160a432b5502e91bcf2dd834ad52fa7/info: appending metadata at 1733349016874 (+3 ms)Flushing e160a432b5502e91bcf2dd834ad52fa7/info: closing flushed file at 1733349016874Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77c9d8a: reopening flushed file at 1733349016893 (+19 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e160a432b5502e91bcf2dd834ad52fa7 in 31ms, sequenceid=65, compaction requested=false at 1733349016901 (+8 ms)Writing region close event to WAL at 1733349016914 (+13 ms)Running coprocessor post-close hooks at 1733349016920 (+6 ms)Closed at 1733349016920 2024-12-04T21:50:16,920 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733348980140.e160a432b5502e91bcf2dd834ad52fa7. 2024-12-04T21:50:16,937 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/ns/7f4642c9c0144d22b377c65c82762827 is 43, key is default/ns:d/1733348979936/Put/seqid=0 2024-12-04T21:50:16,939 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,939 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:50:16,939 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741897_1083 2024-12-04T21:50:16,940 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:50:16,941 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,941 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:16,941 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741898_1084 2024-12-04T21:50:16,942 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:16,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741899_1085 (size=5153) 2024-12-04T21:50:16,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741899_1085 (size=5153) 2024-12-04T21:50:16,946 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/ns/7f4642c9c0144d22b377c65c82762827 2024-12-04T21:50:16,966 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/table/7e16b1923b1d44f3a41cb249e6de6329 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733348980520/Put/seqid=0 2024-12-04T21:50:16,967 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,968 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK], DatanodeInfoWithStorage[127.0.0.1:34003,DS-8c8213f7-0d4c-4047-a947-b058f90904b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK]) is bad. 2024-12-04T21:50:16,968 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741900_1086 2024-12-04T21:50:16,968 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39293,DS-d257fea5-30b5-4cb6-9919-c89aef35eb43,DISK] 2024-12-04T21:50:16,969 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,969 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK]) is bad. 2024-12-04T21:50:16,969 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741901_1087 2024-12-04T21:50:16,969 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34351,DS-ee248095-2476-4cef-a4ff-b9ef6a921734,DISK] 2024-12-04T21:50:16,971 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39969 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:16,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:56222 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data4]'}, localName='127.0.0.1:38171', datanodeUuid='d1e53198-d7bc-4599-888e-c6fe62466d4c', xmitsInProgress=0}:Exception transferring block BP-1821304615-172.17.0.2-1733348978047:blk_1073741902_1088 to mirror 127.0.0.1:39969 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:16,971 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1821304615-172.17.0.2-1733348978047:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38171,DS-355869a6-3520-426c-9c30-2d5062b9d3c1,DISK], DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK]) is bad. 2024-12-04T21:50:16,971 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1821304615-172.17.0.2-1733348978047:blk_1073741902_1088 2024-12-04T21:50:16,971 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:56222 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-04T21:50:16,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1160386470_22 at /127.0.0.1:56222 [Receiving block BP-1821304615-172.17.0.2-1733348978047:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:38171:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56222 dst: /127.0.0.1:38171 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:16,972 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39969,DS-9f5b1aeb-463d-49c0-a73e-7c9c23b7f86e,DISK] 2024-12-04T21:50:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741903_1089 (size=5424) 2024-12-04T21:50:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741903_1089 (size=5424) 2024-12-04T21:50:16,977 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/table/7e16b1923b1d44f3a41cb249e6de6329 2024-12-04T21:50:16,983 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/info/b79a1e9fd87d44339b33cc11dafb9a87 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/info/b79a1e9fd87d44339b33cc11dafb9a87 2024-12-04T21:50:16,989 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/info/b79a1e9fd87d44339b33cc11dafb9a87, entries=10, sequenceid=11, filesize=6.9 K 2024-12-04T21:50:16,990 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/ns/7f4642c9c0144d22b377c65c82762827 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/ns/7f4642c9c0144d22b377c65c82762827 2024-12-04T21:50:16,996 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/ns/7f4642c9c0144d22b377c65c82762827, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T21:50:16,997 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/.tmp/table/7e16b1923b1d44f3a41cb249e6de6329 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/table/7e16b1923b1d44f3a41cb249e6de6329 2024-12-04T21:50:17,002 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/table/7e16b1923b1d44f3a41cb249e6de6329, entries=2, sequenceid=11, filesize=5.3 K 2024-12-04T21:50:17,003 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-12-04T21:50:17,007 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T21:50:17,008 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:50:17,008 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:50:17,008 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349016871Running coprocessor pre-close hooks at 1733349016871Disabling compacts and flushes for region at 1733349016871Disabling writes for close at 1733349016871Obtaining lock to block concurrent updates at 1733349016871Preparing flush snapshotting stores in 1588230740 at 1733349016871Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733349016871Flushing stores of hbase:meta,,1.1588230740 at 1733349016892 (+21 ms)Flushing 1588230740/info: creating writer at 1733349016892Flushing 1588230740/info: appending metadata at 1733349016912 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733349016912Flushing 1588230740/ns: creating writer at 1733349016925 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733349016937 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733349016937Flushing 1588230740/table: creating writer at 1733349016952 (+15 ms)Flushing 1588230740/table: appending metadata at 1733349016966 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733349016966Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28a3a306: reopening flushed file at 1733349016982 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15ac5b1e: reopening flushed file at 1733349016989 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14516beb: reopening flushed file at 1733349016996 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1733349017003 (+7 ms)Writing region close event to WAL at 1733349017004 (+1 ms)Running coprocessor post-close hooks at 1733349017008 (+4 ms)Closed at 1733349017008 2024-12-04T21:50:17,008 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:50:17,037 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.1733348998772 to hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs/bb3046a53f79%2C39097%2C1733348978686.1733348998772 2024-12-04T21:50:17,071 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,39097,1733348978686; all regions closed. 2024-12-04T21:50:17,072 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:17,072 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:17,072 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:17,073 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:17,073 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:17,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741893_1078 (size=825) 2024-12-04T21:50:17,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741893_1078 (size=825) 2024-12-04T21:50:17,111 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T21:50:17,112 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T21:50:17,339 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T21:50:17,339 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T21:50:17,341 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:50:17,652 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@159224db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34003, datanodeUuid=7223e6c5-6700-434a-ad9b-af10c996f444, infoPort=46571, infoSecurePort=0, ipcPort=46361, storageInfo=lv=-57;cid=testClusterID;nsid=260884353;c=1733348978047):Failed to transfer BP-1821304615-172.17.0.2-1733348978047:blk_1073741862_1045 to 127.0.0.1:39969 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:18,114 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:50:20,157 INFO [master/bb3046a53f79:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T21:50:20,157 INFO [master/bb3046a53f79:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T21:50:20,873 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 after 4002ms 2024-12-04T21:50:20,887 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta after 4002ms 2024-12-04T21:50:21,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741835_1011 (size=393) 2024-12-04T21:50:21,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:50:21,871 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-04T21:50:21,875 DEBUG [RS:1;bb3046a53f79:42727 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs 2024-12-04T21:50:21,875 INFO [RS:1;bb3046a53f79:42727 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C42727%2C1733348980042:(num 1733348980243) 2024-12-04T21:50:21,875 DEBUG [RS:1;bb3046a53f79:42727 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:21,875 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:50:21,876 INFO [RS:1;bb3046a53f79:42727 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:50:21,876 INFO [RS:1;bb3046a53f79:42727 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T21:50:21,876 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:50:21,876 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:50:21,876 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:50:21,876 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:50:21,876 INFO [RS:1;bb3046a53f79:42727 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:50:21,877 INFO [RS:1;bb3046a53f79:42727 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42727 2024-12-04T21:50:21,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,42727,1733348980042 2024-12-04T21:50:21,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:50:21,879 INFO [RS:1;bb3046a53f79:42727 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:50:21,880 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,42727,1733348980042] 2024-12-04T21:50:21,880 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,42727,1733348980042 already deleted, retry=false 2024-12-04T21:50:21,880 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,42727,1733348980042 expired; onlineServers=1 2024-12-04T21:50:21,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:21,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:21,980 INFO [RS:1;bb3046a53f79:42727 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:50:21,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42727-0x100a735cc2d0002, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:21,980 INFO [RS:1;bb3046a53f79:42727 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,42727,1733348980042; zookeeper connection closed. 2024-12-04T21:50:21,981 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@32462869 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@32462869 2024-12-04T21:50:22,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:50:22,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:50:22,074 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-04T21:50:22,082 DEBUG [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs 2024-12-04T21:50:22,082 INFO [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C39097%2C1733348978686.meta:.meta(num 1733349016872) 2024-12-04T21:50:22,083 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,083 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,084 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,084 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,084 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741884_1068 (size=15140) 2024-12-04T21:50:22,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741884_1068 (size=15140) 2024-12-04T21:50:22,090 DEBUG [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/oldWALs 2024-12-04T21:50:22,091 INFO [RS:0;bb3046a53f79:39097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C39097%2C1733348978686:(num 1733349016616) 2024-12-04T21:50:22,091 DEBUG [RS:0;bb3046a53f79:39097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:22,091 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:50:22,091 INFO [RS:0;bb3046a53f79:39097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:50:22,091 INFO [RS:0;bb3046a53f79:39097 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T21:50:22,091 INFO [RS:0;bb3046a53f79:39097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:50:22,091 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:50:22,091 INFO [RS:0;bb3046a53f79:39097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39097 2024-12-04T21:50:22,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:50:22,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,39097,1733348978686 2024-12-04T21:50:22,093 INFO [RS:0;bb3046a53f79:39097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:50:22,094 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,39097,1733348978686] 2024-12-04T21:50:22,095 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,39097,1733348978686 already deleted, retry=false 2024-12-04T21:50:22,095 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,39097,1733348978686 expired; onlineServers=0 2024-12-04T21:50:22,095 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,38967,1733348978643' ***** 2024-12-04T21:50:22,095 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:50:22,096 INFO [M:0;bb3046a53f79:38967 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:50:22,096 INFO [M:0;bb3046a53f79:38967 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:50:22,096 DEBUG [M:0;bb3046a53f79:38967 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:50:22,096 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:50:22,096 DEBUG [M:0;bb3046a53f79:38967 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:50:22,096 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348979270 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733348979270,5,FailOnTimeoutGroup] 2024-12-04T21:50:22,096 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348979269 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733348979269,5,FailOnTimeoutGroup] 2024-12-04T21:50:22,096 INFO [M:0;bb3046a53f79:38967 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:50:22,096 INFO [M:0;bb3046a53f79:38967 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:50:22,097 DEBUG [M:0;bb3046a53f79:38967 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:50:22,097 INFO [M:0;bb3046a53f79:38967 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:50:22,097 INFO [M:0;bb3046a53f79:38967 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:50:22,097 INFO [M:0;bb3046a53f79:38967 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:50:22,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:50:22,097 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:50:22,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:22,097 DEBUG [M:0;bb3046a53f79:38967 {}] zookeeper.ZKUtil(347): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:50:22,097 WARN [M:0;bb3046a53f79:38967 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:50:22,098 INFO [M:0;bb3046a53f79:38967 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/.lastflushedseqids 2024-12-04T21:50:22,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741904_1090 (size=130) 2024-12-04T21:50:22,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741904_1090 (size=130) 2024-12-04T21:50:22,107 INFO [M:0;bb3046a53f79:38967 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:50:22,107 INFO [M:0;bb3046a53f79:38967 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:50:22,107 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:50:22,108 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:22,108 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:22,108 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:50:22,108 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:22,108 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-04T21:50:22,122 DEBUG [M:0;bb3046a53f79:38967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/782d67b274724d4cb213097b8f41b4e6 is 82, key is hbase:meta,,1/info:regioninfo/1733348979922/Put/seqid=0 2024-12-04T21:50:22,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741905_1091 (size=5672) 2024-12-04T21:50:22,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741905_1091 (size=5672) 2024-12-04T21:50:22,127 INFO [M:0;bb3046a53f79:38967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/782d67b274724d4cb213097b8f41b4e6 2024-12-04T21:50:22,146 DEBUG [M:0;bb3046a53f79:38967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8fe79bf0f0e747f5b019a533c9633bde is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733348980525/Put/seqid=0 2024-12-04T21:50:22,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741906_1092 (size=6255) 2024-12-04T21:50:22,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741906_1092 (size=6255) 2024-12-04T21:50:22,151 INFO [M:0;bb3046a53f79:38967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8fe79bf0f0e747f5b019a533c9633bde 2024-12-04T21:50:22,156 INFO [M:0;bb3046a53f79:38967 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8fe79bf0f0e747f5b019a533c9633bde 2024-12-04T21:50:22,169 DEBUG [M:0;bb3046a53f79:38967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9027257d7304431bac34a7e1e5420d8b is 69, key is bb3046a53f79,39097,1733348978686/rs:state/1733348979322/Put/seqid=0 2024-12-04T21:50:22,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741907_1093 (size=5224) 2024-12-04T21:50:22,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741907_1093 (size=5224) 2024-12-04T21:50:22,174 INFO [M:0;bb3046a53f79:38967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9027257d7304431bac34a7e1e5420d8b 2024-12-04T21:50:22,191 DEBUG [M:0;bb3046a53f79:38967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/34573bb2b65b46218dbeee6b425397e6 is 52, key is load_balancer_on/state:d/1733348980020/Put/seqid=0 2024-12-04T21:50:22,194 INFO [RS:0;bb3046a53f79:39097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:50:22,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:22,194 INFO [RS:0;bb3046a53f79:39097 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,39097,1733348978686; zookeeper connection closed. 2024-12-04T21:50:22,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39097-0x100a735cc2d0001, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:22,195 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ab379ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ab379ca 2024-12-04T21:50:22,195 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-04T21:50:22,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741908_1094 (size=5056) 2024-12-04T21:50:22,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741908_1094 (size=5056) 2024-12-04T21:50:22,447 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:50:22,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:22,597 INFO [M:0;bb3046a53f79:38967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/34573bb2b65b46218dbeee6b425397e6 2024-12-04T21:50:22,609 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/782d67b274724d4cb213097b8f41b4e6 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/782d67b274724d4cb213097b8f41b4e6 2024-12-04T21:50:22,615 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/782d67b274724d4cb213097b8f41b4e6, entries=8, sequenceid=60, filesize=5.5 K 2024-12-04T21:50:22,616 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8fe79bf0f0e747f5b019a533c9633bde as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8fe79bf0f0e747f5b019a533c9633bde 2024-12-04T21:50:22,622 INFO [M:0;bb3046a53f79:38967 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8fe79bf0f0e747f5b019a533c9633bde 2024-12-04T21:50:22,622 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8fe79bf0f0e747f5b019a533c9633bde, entries=6, sequenceid=60, filesize=6.1 K 2024-12-04T21:50:22,623 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9027257d7304431bac34a7e1e5420d8b as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9027257d7304431bac34a7e1e5420d8b 2024-12-04T21:50:22,630 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9027257d7304431bac34a7e1e5420d8b, entries=2, sequenceid=60, filesize=5.1 K 2024-12-04T21:50:22,631 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/34573bb2b65b46218dbeee6b425397e6 as hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/34573bb2b65b46218dbeee6b425397e6 2024-12-04T21:50:22,637 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/34573bb2b65b46218dbeee6b425397e6, entries=1, sequenceid=60, filesize=4.9 K 2024-12-04T21:50:22,639 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 530ms, sequenceid=60, compaction requested=false 2024-12-04T21:50:22,640 INFO [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:22,640 DEBUG [M:0;bb3046a53f79:38967 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349022107Disabling compacts and flushes for region at 1733349022107Disabling writes for close at 1733349022108 (+1 ms)Obtaining lock to block concurrent updates at 1733349022108Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733349022108Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733349022108Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733349022109 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733349022109Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733349022121 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733349022122 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733349022132 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733349022145 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733349022145Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733349022156 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733349022169 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733349022169Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733349022178 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733349022190 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733349022190Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1273afae: reopening flushed file at 1733349022608 (+418 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@211c1957: reopening flushed file at 1733349022615 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@172a06a5: reopening flushed file at 1733349022622 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@145aee2e: reopening flushed file at 1733349022630 (+8 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 530ms, sequenceid=60, compaction requested=false at 1733349022639 (+9 ms)Writing region close event to WAL at 1733349022640 (+1 ms)Closed at 1733349022640 2024-12-04T21:50:22,641 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,641 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,641 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,641 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,641 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:22,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38171 is added to blk_1073741881_1064 (size=1045) 2024-12-04T21:50:22,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741881_1064 (size=1045) 2024-12-04T21:50:22,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:22,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:23,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:23,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:24,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:50:24,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741836_1012 (size=76) 2024-12-04T21:50:24,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-04T21:50:24,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:50:24,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T21:50:24,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T21:50:24,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:24,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:50:25,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:50:25,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:25,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:26,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:26,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:27,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34003 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:50:27,641 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-04T21:50:27,642 INFO [M:0;bb3046a53f79:38967 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:50:27,642 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:50:27,643 INFO [M:0;bb3046a53f79:38967 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38967 2024-12-04T21:50:27,643 INFO [M:0;bb3046a53f79:38967 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:50:27,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:27,746 INFO [M:0;bb3046a53f79:38967 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:50:27,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38967-0x100a735cc2d0000, quorum=127.0.0.1:51562, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:27,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b3c44a7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:27,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6682cb77{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:27,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:27,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aae8f75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:27,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66f331b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:27,758 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:50:27,758 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:50:27,758 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:50:27,758 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1821304615-172.17.0.2-1733348978047 (Datanode Uuid d1e53198-d7bc-4599-888e-c6fe62466d4c) service to localhost/127.0.0.1:38693 2024-12-04T21:50:27,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data3/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:27,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data4/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:27,759 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:50:27,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52bf7bc9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:27,761 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@309070cf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:27,761 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:27,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cb49758{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:27,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47e3d6b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:27,762 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:50:27,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:50:27,762 WARN [BP-1821304615-172.17.0.2-1733348978047 heartbeating to localhost/127.0.0.1:38693 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1821304615-172.17.0.2-1733348978047 (Datanode Uuid 7223e6c5-6700-434a-ad9b-af10c996f444) service to localhost/127.0.0.1:38693 2024-12-04T21:50:27,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:50:27,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data9/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:27,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/cluster_9a3a30b5-6077-1d44-53dd-5933bb32dc48/data/data10/current/BP-1821304615-172.17.0.2-1733348978047 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:27,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:50:27,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b27d212{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:50:27,768 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@92a3852{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:27,768 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:27,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@498b2e6c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:27,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fe50a4a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:27,777 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:50:27,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:50:27,812 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39903 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:38693 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38693 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38693 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f8914bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f8914bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38693 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39903 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38693 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38693 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38693 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38693 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f8914bf55b8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38693 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38693 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38693 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=434 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=121 (was 210), ProcessCount=11 (was 11), AvailableMemoryMB=2530 (was 3345) 2024-12-04T21:50:27,818 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=434, MaxFileDescriptor=1048576, SystemLoadAverage=121, ProcessCount=11, AvailableMemoryMB=2529 2024-12-04T21:50:27,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:50:27,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.log.dir so I do NOT create it in target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef 2024-12-04T21:50:27,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/dc9f77ce-f8e6-dc9e-4d74-c0f0fd08180a/hadoop.tmp.dir so I do NOT create it in target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff, deleteOnExit=true 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/test.cache.data in system properties and HBase conf 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:50:27,819 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:50:27,819 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:50:27,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:50:27,831 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:50:27,875 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:27,879 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:27,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:27,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:27,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:50:27,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:27,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:27,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@521c98fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:27,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5771e35b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:27,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:27,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16b67381{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-43131-hadoop-hdfs-3_4_1-tests_jar-_-any-3601599695895950662/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:50:27,973 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6274e59b{HTTP/1.1, (http/1.1)}{localhost:43131} 2024-12-04T21:50:27,973 INFO [Time-limited test {}] server.Server(415): Started @148903ms 2024-12-04T21:50:27,984 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:50:28,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:28,036 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:28,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:28,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:28,037 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:50:28,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e543aab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:28,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65b2da55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:28,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4930728e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-33365-hadoop-hdfs-3_4_1-tests_jar-_-any-3100458844243857225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:28,130 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f91f141{HTTP/1.1, (http/1.1)}{localhost:33365} 2024-12-04T21:50:28,131 INFO [Time-limited test {}] server.Server(415): Started @149061ms 2024-12-04T21:50:28,132 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:28,156 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:28,159 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:28,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:28,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:28,160 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:50:28,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f4d9d98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:28,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@261c7248{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:28,187 WARN [Thread-1186 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data1/current/BP-69408873-172.17.0.2-1733349027842/current, will proceed with Du for space computation calculation, 2024-12-04T21:50:28,187 WARN [Thread-1187 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data2/current/BP-69408873-172.17.0.2-1733349027842/current, will proceed with Du for space computation calculation, 2024-12-04T21:50:28,200 WARN [Thread-1165 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:28,202 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf0b24201559b7b5 with lease ID 0xb0b5040f23bbd201: Processing first storage report for DS-5dd181de-54fa-48ad-b319-1630d6afba99 from datanode DatanodeRegistration(127.0.0.1:33911, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=34777, infoSecurePort=0, ipcPort=38207, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842) 2024-12-04T21:50:28,202 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf0b24201559b7b5 with lease ID 0xb0b5040f23bbd201: from storage DS-5dd181de-54fa-48ad-b319-1630d6afba99 node DatanodeRegistration(127.0.0.1:33911, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=34777, infoSecurePort=0, ipcPort=38207, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:28,202 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf0b24201559b7b5 with lease ID 0xb0b5040f23bbd201: Processing first storage report for DS-4e4444ce-aed3-40c4-8de6-2caa13fe1dde from datanode DatanodeRegistration(127.0.0.1:33911, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=34777, infoSecurePort=0, ipcPort=38207, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842) 2024-12-04T21:50:28,202 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf0b24201559b7b5 with lease ID 0xb0b5040f23bbd201: from storage DS-4e4444ce-aed3-40c4-8de6-2caa13fe1dde node DatanodeRegistration(127.0.0.1:33911, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=34777, infoSecurePort=0, ipcPort=38207, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:28,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26c31391{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-44023-hadoop-hdfs-3_4_1-tests_jar-_-any-1558809558410720955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:28,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6150aad0{HTTP/1.1, (http/1.1)}{localhost:44023} 2024-12-04T21:50:28,255 INFO [Time-limited test {}] server.Server(415): Started @149184ms 2024-12-04T21:50:28,256 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:28,310 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data3/current/BP-69408873-172.17.0.2-1733349027842/current, will proceed with Du for space computation calculation, 2024-12-04T21:50:28,310 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data4/current/BP-69408873-172.17.0.2-1733349027842/current, will proceed with Du for space computation calculation, 2024-12-04T21:50:28,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:28,324 WARN [Thread-1201 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:28,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9bb7259d2053fb with lease ID 0xb0b5040f23bbd202: Processing first storage report for DS-125337f2-c826-494d-a68f-a6da5122d160 from datanode DatanodeRegistration(127.0.0.1:34505, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=45847, infoSecurePort=0, ipcPort=44943, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842) 2024-12-04T21:50:28,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9bb7259d2053fb with lease ID 0xb0b5040f23bbd202: from storage DS-125337f2-c826-494d-a68f-a6da5122d160 node DatanodeRegistration(127.0.0.1:34505, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=45847, infoSecurePort=0, ipcPort=44943, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:28,326 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9bb7259d2053fb with lease ID 0xb0b5040f23bbd202: Processing first storage report for DS-9e94cea7-f9de-438b-8dfa-254740c8acfe from datanode DatanodeRegistration(127.0.0.1:34505, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=45847, infoSecurePort=0, ipcPort=44943, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842) 2024-12-04T21:50:28,326 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9bb7259d2053fb with lease ID 0xb0b5040f23bbd202: from storage DS-9e94cea7-f9de-438b-8dfa-254740c8acfe node DatanodeRegistration(127.0.0.1:34505, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=45847, infoSecurePort=0, ipcPort=44943, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:28,379 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef 2024-12-04T21:50:28,383 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/zookeeper_0, clientPort=62257, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:50:28,385 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62257 2024-12-04T21:50:28,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,386 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:50:28,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:50:28,395 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62 with version=8 2024-12-04T21:50:28,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:50:28,397 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:50:28,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:50:28,398 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43273 2024-12-04T21:50:28,400 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43273 connecting to ZooKeeper ensemble=127.0.0.1:62257 2024-12-04T21:50:28,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:432730x0, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:50:28,403 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43273-0x100a7368e8c0000 connected 2024-12-04T21:50:28,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:28,422 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62, hbase.cluster.distributed=false 2024-12-04T21:50:28,423 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:50:28,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43273 2024-12-04T21:50:28,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43273 2024-12-04T21:50:28,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43273 2024-12-04T21:50:28,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43273 2024-12-04T21:50:28,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43273 2024-12-04T21:50:28,439 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:50:28,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:50:28,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:50:28,440 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:50:28,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:50:28,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:50:28,440 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:50:28,440 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:50:28,441 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46493 2024-12-04T21:50:28,442 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46493 connecting to ZooKeeper ensemble=127.0.0.1:62257 2024-12-04T21:50:28,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464930x0, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:50:28,449 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:464930x0, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:28,449 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46493-0x100a7368e8c0001 connected 2024-12-04T21:50:28,449 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:50:28,450 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:50:28,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:50:28,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:50:28,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46493 2024-12-04T21:50:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46493 2024-12-04T21:50:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46493 2024-12-04T21:50:28,457 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46493 2024-12-04T21:50:28,457 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46493 2024-12-04T21:50:28,467 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:43273 2024-12-04T21:50:28,467 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:50:28,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:50:28,469 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:50:28,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,470 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:50:28,470 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,43273,1733349028397 from backup master directory 2024-12-04T21:50:28,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:50:28,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:50:28,471 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:50:28,471 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,476 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/hbase.id] with ID: 2f7d01d4-c5fe-4e6b-bbb8-4bbcffac06d7 2024-12-04T21:50:28,477 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/.tmp/hbase.id 2024-12-04T21:50:28,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:50:28,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:50:28,482 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/.tmp/hbase.id]:[hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/hbase.id] 2024-12-04T21:50:28,492 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:28,493 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:50:28,494 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T21:50:28,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:50:28,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:50:28,502 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:50:28,502 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:50:28,503 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:50:28,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:50:28,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:50:28,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:28,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:28,910 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store 2024-12-04T21:50:28,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:50:28,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:50:28,917 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:50:28,917 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:50:28,917 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:28,917 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:28,917 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:50:28,917 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:28,917 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:28,917 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349028917Disabling compacts and flushes for region at 1733349028917Disabling writes for close at 1733349028917Writing region close event to WAL at 1733349028917Closed at 1733349028917 2024-12-04T21:50:28,918 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/.initializing 2024-12-04T21:50:28,918 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,921 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C43273%2C1733349028397, suffix=, logDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397, archiveDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/oldWALs, maxLogs=10 2024-12-04T21:50:28,921 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C43273%2C1733349028397.1733349028921 2024-12-04T21:50:28,926 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 2024-12-04T21:50:28,927 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34777:34777),(127.0.0.1/127.0.0.1:45847:45847)] 2024-12-04T21:50:28,927 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:50:28,927 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:50:28,927 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,927 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:50:28,930 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:28,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:28,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:50:28,932 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:28,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:50:28,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,933 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:50:28,933 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:28,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:50:28,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:50:28,935 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:28,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:50:28,935 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,936 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,936 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,938 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,938 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,938 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:50:28,939 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:50:28,941 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:50:28,942 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739284, jitterRate=-0.059952810406684875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:50:28,942 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733349028928Initializing all the Stores at 1733349028928Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349028928Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349028929 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349028929Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349028929Cleaning up temporary data from old regions at 1733349028938 (+9 ms)Region opened successfully at 1733349028942 (+4 ms) 2024-12-04T21:50:28,944 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:50:28,947 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6865cdde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:50:28,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:50:28,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:50:28,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:50:28,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:50:28,948 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T21:50:28,949 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T21:50:28,949 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:50:28,951 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:50:28,952 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:50:28,953 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:50:28,953 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:50:28,954 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:50:28,955 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:50:28,955 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:50:28,956 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:50:28,957 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:50:28,957 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:50:28,958 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:50:28,960 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:50:28,961 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:50:28,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:28,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:28,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,962 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,43273,1733349028397, sessionid=0x100a7368e8c0000, setting cluster-up flag (Was=false) 2024-12-04T21:50:28,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,966 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:50:28,967 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:28,971 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:50:28,972 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,43273,1733349028397 2024-12-04T21:50:28,973 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:50:28,974 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:50:28,974 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:50:28,975 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:50:28,975 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,43273,1733349028397 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:50:28,976 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:28,981 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733349058981 2024-12-04T21:50:28,981 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:28,982 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:50:28,982 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:50:28,982 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:50:28,983 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:50:28,983 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:50:28,983 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:50:28,983 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:50:28,984 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:28,984 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:50:28,985 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349028983,5,FailOnTimeoutGroup] 2024-12-04T21:50:28,988 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349028985,5,FailOnTimeoutGroup] 2024-12-04T21:50:28,988 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:28,989 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:50:28,989 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:28,989 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:28,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:50:28,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:50:28,993 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:50:28,994 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62 2024-12-04T21:50:29,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:50:29,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:50:29,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:50:29,005 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:50:29,006 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:50:29,006 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:50:29,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:50:29,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:50:29,010 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:50:29,010 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:50:29,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:50:29,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:50:29,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740 2024-12-04T21:50:29,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740 2024-12-04T21:50:29,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:50:29,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:50:29,015 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:50:29,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:50:29,018 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:50:29,018 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783991, jitterRate=-0.0031041353940963745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:50:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733349029003Initializing all the Stores at 1733349029004 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349029004Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349029005 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349029005Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349029005Cleaning up temporary data from old regions at 1733349029014 (+9 ms)Region opened successfully at 1733349029019 (+5 ms) 2024-12-04T21:50:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:50:29,019 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:50:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:50:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:50:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:50:29,019 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:50:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349029019Disabling compacts and flushes for region at 1733349029019Disabling writes for close at 1733349029019Writing region close event to WAL at 1733349029019Closed at 1733349029019 2024-12-04T21:50:29,020 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:50:29,020 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:50:29,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:50:29,021 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:50:29,022 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:50:29,061 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(746): ClusterId : 2f7d01d4-c5fe-4e6b-bbb8-4bbcffac06d7 2024-12-04T21:50:29,061 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:50:29,063 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:50:29,063 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:50:29,066 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:50:29,066 DEBUG [RS:0;bb3046a53f79:46493 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2078c698, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:50:29,079 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:46493 2024-12-04T21:50:29,079 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:50:29,079 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:50:29,079 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:50:29,080 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,43273,1733349028397 with port=46493, startcode=1733349028439 2024-12-04T21:50:29,080 DEBUG [RS:0;bb3046a53f79:46493 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:50:29,082 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55385, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:50:29,082 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43273 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,082 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43273 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,084 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62 2024-12-04T21:50:29,084 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32793 2024-12-04T21:50:29,084 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:50:29,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:50:29,085 DEBUG [RS:0;bb3046a53f79:46493 {}] zookeeper.ZKUtil(111): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,086 WARN [RS:0;bb3046a53f79:46493 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:50:29,086 INFO [RS:0;bb3046a53f79:46493 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:50:29,086 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,46493,1733349028439] 2024-12-04T21:50:29,086 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,089 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:50:29,091 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:50:29,091 INFO [RS:0;bb3046a53f79:46493 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:50:29,091 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,091 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:50:29,092 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:50:29,092 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,092 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,092 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:50:29,093 DEBUG [RS:0;bb3046a53f79:46493 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:50:29,096 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,096 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,096 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,096 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,096 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,096 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46493,1733349028439-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:50:29,110 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:50:29,110 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46493,1733349028439-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,110 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,110 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.Replication(171): bb3046a53f79,46493,1733349028439 started 2024-12-04T21:50:29,121 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,121 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,46493,1733349028439, RpcServer on bb3046a53f79/172.17.0.2:46493, sessionid=0x100a7368e8c0001 2024-12-04T21:50:29,121 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:50:29,121 DEBUG [RS:0;bb3046a53f79:46493 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,121 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,46493,1733349028439' 2024-12-04T21:50:29,121 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:50:29,122 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:50:29,122 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:50:29,122 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:50:29,122 DEBUG [RS:0;bb3046a53f79:46493 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,122 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,46493,1733349028439' 2024-12-04T21:50:29,123 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:50:29,123 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:50:29,123 DEBUG [RS:0;bb3046a53f79:46493 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:50:29,123 INFO [RS:0;bb3046a53f79:46493 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:50:29,123 INFO [RS:0;bb3046a53f79:46493 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:50:29,173 WARN [bb3046a53f79:43273 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:50:29,227 INFO [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46493%2C1733349028439, suffix=, logDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439, archiveDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs, maxLogs=32 2024-12-04T21:50:29,228 INFO [RS:0;bb3046a53f79:46493 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:29,240 INFO [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:29,241 DEBUG [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45847:45847),(127.0.0.1/127.0.0.1:34777:34777)] 2024-12-04T21:50:29,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:29,423 DEBUG [bb3046a53f79:43273 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:50:29,424 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,427 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,46493,1733349028439, state=OPENING 2024-12-04T21:50:29,430 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:50:29,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:29,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:29,433 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:50:29,433 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:50:29,433 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:50:29,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,46493,1733349028439}] 2024-12-04T21:50:29,590 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:50:29,594 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48621, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:50:29,599 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:50:29,599 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:50:29,601 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46493%2C1733349028439.meta, suffix=.meta, logDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439, archiveDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs, maxLogs=32 2024-12-04T21:50:29,602 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta 2024-12-04T21:50:29,608 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta 2024-12-04T21:50:29,609 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34777:34777),(127.0.0.1/127.0.0.1:45847:45847)] 2024-12-04T21:50:29,610 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:50:29,610 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:50:29,610 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:50:29,610 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:50:29,610 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:50:29,611 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:50:29,611 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:50:29,611 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:50:29,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:50:29,613 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:50:29,613 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:50:29,615 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:50:29,615 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:50:29,616 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:50:29,616 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:50:29,618 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:50:29,618 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:50:29,618 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:50:29,619 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740 2024-12-04T21:50:29,621 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740 2024-12-04T21:50:29,622 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:50:29,622 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:50:29,623 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:50:29,625 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:50:29,627 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797580, jitterRate=0.014176055788993835}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:50:29,627 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:50:29,628 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733349029611Writing region info on filesystem at 1733349029611Initializing all the Stores at 1733349029612 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349029612Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349029612Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349029612Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349029612Cleaning up temporary data from old regions at 1733349029623 (+11 ms)Running coprocessor post-open hooks at 1733349029627 (+4 ms)Region opened successfully at 1733349029628 (+1 ms) 2024-12-04T21:50:29,629 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733349029589 2024-12-04T21:50:29,632 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:50:29,632 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:50:29,633 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,635 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,46493,1733349028439, state=OPEN 2024-12-04T21:50:29,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:50:29,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:50:29,637 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,637 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:50:29,637 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:50:29,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:50:29,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,46493,1733349028439 in 204 msec 2024-12-04T21:50:29,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:50:29,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 621 msec 2024-12-04T21:50:29,645 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:50:29,645 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:50:29,647 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:50:29,647 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,46493,1733349028439, seqNum=-1] 2024-12-04T21:50:29,648 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:50:29,649 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35057, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:50:29,657 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 681 msec 2024-12-04T21:50:29,657 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733349029657, completionTime=-1 2024-12-04T21:50:29,657 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:50:29,657 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:50:29,660 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:50:29,660 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733349089660 2024-12-04T21:50:29,660 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349149660 2024-12-04T21:50:29,660 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-04T21:50:29,660 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,43273,1733349028397-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,660 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,43273,1733349028397-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,661 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,43273,1733349028397-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,661 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:43273, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,661 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,661 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,663 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:50:29,664 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.193sec 2024-12-04T21:50:29,664 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:50:29,665 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:50:29,665 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:50:29,665 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:50:29,665 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:50:29,665 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,43273,1733349028397-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:50:29,665 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,43273,1733349028397-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:50:29,668 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:50:29,668 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:50:29,668 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,43273,1733349028397-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:50:29,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33efec7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:50:29,761 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,43273,-1 for getting cluster id 2024-12-04T21:50:29,762 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:50:29,765 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2f7d01d4-c5fe-4e6b-bbb8-4bbcffac06d7' 2024-12-04T21:50:29,766 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:50:29,766 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2f7d01d4-c5fe-4e6b-bbb8-4bbcffac06d7" 2024-12-04T21:50:29,767 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4444d8d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:50:29,767 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,43273,-1] 2024-12-04T21:50:29,768 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:50:29,768 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:29,770 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35188, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:50:29,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69ae8bd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:50:29,771 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:50:29,773 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,46493,1733349028439, seqNum=-1] 2024-12-04T21:50:29,773 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:50:29,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:50:29,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,43273,1733349028397 2024-12-04T21:50:29,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:50:29,780 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:50:29,780 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-04T21:50:29,780 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-04T21:50:29,780 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T21:50:29,782 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is bb3046a53f79,43273,1733349028397 2024-12-04T21:50:29,782 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77f4eae 2024-12-04T21:50:29,782 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T21:50:29,784 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35196, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T21:50:29,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T21:50:29,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T21:50:29,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:50:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T21:50:29,790 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T21:50:29,790 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:29,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-04T21:50:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:50:29,791 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T21:50:29,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741835_1011 (size=395) 2024-12-04T21:50:29,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741835_1011 (size=395) 2024-12-04T21:50:29,800 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 85aba77a8e5747b7cf1d2c755905cb27, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62 2024-12-04T21:50:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34505 is added to blk_1073741836_1012 (size=78) 2024-12-04T21:50:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33911 is added to blk_1073741836_1012 (size=78) 2024-12-04T21:50:29,807 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:50:29,807 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 85aba77a8e5747b7cf1d2c755905cb27, disabling compactions & flushes 2024-12-04T21:50:29,807 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:29,807 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:29,807 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. after waiting 0 ms 2024-12-04T21:50:29,807 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:29,807 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:29,807 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 85aba77a8e5747b7cf1d2c755905cb27: Waiting for close lock at 1733349029807Disabling compacts and flushes for region at 1733349029807Disabling writes for close at 1733349029807Writing region close event to WAL at 1733349029807Closed at 1733349029807 2024-12-04T21:50:29,809 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T21:50:29,809 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733349029809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733349029809"}]},"ts":"1733349029809"} 2024-12-04T21:50:29,812 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T21:50:29,813 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T21:50:29,813 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733349029813"}]},"ts":"1733349029813"} 2024-12-04T21:50:29,816 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-04T21:50:29,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=85aba77a8e5747b7cf1d2c755905cb27, ASSIGN}] 2024-12-04T21:50:29,817 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=85aba77a8e5747b7cf1d2c755905cb27, ASSIGN 2024-12-04T21:50:29,818 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=85aba77a8e5747b7cf1d2c755905cb27, ASSIGN; state=OFFLINE, location=bb3046a53f79,46493,1733349028439; forceNewPlan=false, retain=false 2024-12-04T21:50:29,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:29,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:29,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=85aba77a8e5747b7cf1d2c755905cb27, regionState=OPENING, regionLocation=bb3046a53f79,46493,1733349028439 2024-12-04T21:50:29,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=85aba77a8e5747b7cf1d2c755905cb27, ASSIGN because future has completed 2024-12-04T21:50:29,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 85aba77a8e5747b7cf1d2c755905cb27, server=bb3046a53f79,46493,1733349028439}] 2024-12-04T21:50:30,144 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:30,144 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 85aba77a8e5747b7cf1d2c755905cb27, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:50:30,144 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,144 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:50:30,145 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,145 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,146 INFO [StoreOpener-85aba77a8e5747b7cf1d2c755905cb27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,148 INFO [StoreOpener-85aba77a8e5747b7cf1d2c755905cb27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 85aba77a8e5747b7cf1d2c755905cb27 columnFamilyName info 2024-12-04T21:50:30,148 DEBUG [StoreOpener-85aba77a8e5747b7cf1d2c755905cb27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:50:30,148 INFO [StoreOpener-85aba77a8e5747b7cf1d2c755905cb27-1 {}] regionserver.HStore(327): Store=85aba77a8e5747b7cf1d2c755905cb27/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:50:30,148 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,149 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,149 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,150 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,150 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,152 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,154 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:50:30,155 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 85aba77a8e5747b7cf1d2c755905cb27; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838739, jitterRate=0.06651186943054199}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:50:30,155 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:30,156 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 85aba77a8e5747b7cf1d2c755905cb27: Running coprocessor pre-open hook at 1733349030145Writing region info on filesystem at 1733349030145Initializing all the Stores at 1733349030146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349030146Cleaning up temporary data from old regions at 1733349030150 (+4 ms)Running coprocessor post-open hooks at 1733349030155 (+5 ms)Region opened successfully at 1733349030156 (+1 ms) 2024-12-04T21:50:30,157 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27., pid=6, masterSystemTime=1733349030133 2024-12-04T21:50:30,159 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:30,159 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:30,160 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=85aba77a8e5747b7cf1d2c755905cb27, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,46493,1733349028439 2024-12-04T21:50:30,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 85aba77a8e5747b7cf1d2c755905cb27, server=bb3046a53f79,46493,1733349028439 because future has completed 2024-12-04T21:50:30,163 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43273 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=bb3046a53f79,46493,1733349028439, table=TestLogRolling-testLogRollOnPipelineRestart, region=85aba77a8e5747b7cf1d2c755905cb27. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-04T21:50:30,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T21:50:30,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 85aba77a8e5747b7cf1d2c755905cb27, server=bb3046a53f79,46493,1733349028439 in 188 msec 2024-12-04T21:50:30,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T21:50:30,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=85aba77a8e5747b7cf1d2c755905cb27, ASSIGN in 351 msec 2024-12-04T21:50:30,171 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T21:50:30,171 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733349030171"}]},"ts":"1733349030171"} 2024-12-04T21:50:30,173 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-04T21:50:30,174 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T21:50:30,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 388 msec 2024-12-04T21:50:30,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:30,337 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:50:30,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:30,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:30,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:31,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:31,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:31,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:32,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:32,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:32,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:33,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:33,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:33,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:34,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:34,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T21:50:34,832 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T21:50:34,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T21:50:34,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-04T21:50:34,836 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:50:34,836 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T21:50:34,836 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-04T21:50:34,836 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-04T21:50:34,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:34,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:35,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T21:50:35,091 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-04T21:50:35,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:35,648 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:50:35,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:50:35,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:35,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:36,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:36,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:36,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:37,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:37,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:37,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:38,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:38,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:38,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:39,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43273 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:50:39,824 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-04T21:50:39,824 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-04T21:50:39,837 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T21:50:39,837 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:39,840 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27., hostname=bb3046a53f79,46493,1733349028439, seqNum=2] 2024-12-04T21:50:39,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:39,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:40,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:40,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:40,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:41,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:41,843 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:41,845 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:41,845 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:41,846 WARN [DataStreamer for file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 block BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK], DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]) is bad. 2024-12-04T21:50:41,847 WARN [PacketResponder: BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34505] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,847 WARN [DataStreamer for file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta block BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK], DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]) is bad. 2024-12-04T21:50:41,847 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:41,848 WARN [DataStreamer for file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 block BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK], DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34505,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]) is bad. 2024-12-04T21:50:41,848 WARN [PacketResponder: BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34505] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:34980 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34980 dst: /127.0.0.1:34505 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,849 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:47618 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47618 dst: /127.0.0.1:33911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,849 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:47622 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47622 dst: /127.0.0.1:33911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1925752952_22 at /127.0.0.1:47600 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47600 dst: /127.0.0.1:33911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1925752952_22 at /127.0.0.1:34942 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34942 dst: /127.0.0.1:34505 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,850 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:34988 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34988 dst: /127.0.0.1:34505 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26c31391{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:41,852 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6150aad0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:41,852 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:41,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@261c7248{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:41,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f4d9d98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:41,853 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:50:41,853 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:50:41,853 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-69408873-172.17.0.2-1733349027842 (Datanode Uuid 76d9aac4-f00f-46a2-811b-2c517873e067) service to localhost/127.0.0.1:32793 2024-12-04T21:50:41,853 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:50:41,854 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data4/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:41,854 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:50:41,857 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data3/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:41,873 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:41,876 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:41,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:41,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:41,877 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:50:41,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54a51f1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:41,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@359657e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:41,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:41,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:41,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f5c5ca8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-34459-hadoop-hdfs-3_4_1-tests_jar-_-any-847252063355140800/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:41,968 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a63cd4{HTTP/1.1, (http/1.1)}{localhost:34459} 2024-12-04T21:50:41,968 INFO [Time-limited test {}] server.Server(415): Started @162898ms 2024-12-04T21:50:41,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:41,986 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:41,986 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:41,986 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:41,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:35484 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35484 dst: /127.0.0.1:33911 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1925752952_22 at /127.0.0.1:35512 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35512 dst: /127.0.0.1:33911 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:35496 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35496 dst: /127.0.0.1:33911 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:41,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4930728e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:41,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f91f141{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:41,988 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:41,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65b2da55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:41,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e543aab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:41,989 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:50:41,989 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:50:41,989 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-69408873-172.17.0.2-1733349027842 (Datanode Uuid 71bb6dfc-d599-4394-a3b5-b55368101870) service to localhost/127.0.0.1:32793 2024-12-04T21:50:41,989 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:50:41,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data1/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:41,990 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data2/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:41,990 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:50:41,998 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:42,001 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:42,002 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:42,002 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:42,002 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:50:42,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b6134cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:42,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7244bddc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:42,030 WARN [Thread-1337 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:42,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83ed1a4feb2214b4 with lease ID 0xb0b5040f23bbd203: from storage DS-125337f2-c826-494d-a68f-a6da5122d160 node DatanodeRegistration(127.0.0.1:43089, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=34121, infoSecurePort=0, ipcPort=41733, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:42,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83ed1a4feb2214b4 with lease ID 0xb0b5040f23bbd203: from storage DS-9e94cea7-f9de-438b-8dfa-254740c8acfe node DatanodeRegistration(127.0.0.1:43089, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=34121, infoSecurePort=0, ipcPort=41733, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:42,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25bf9aaf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-33049-hadoop-hdfs-3_4_1-tests_jar-_-any-50250561521864122/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:42,100 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@791546e7{HTTP/1.1, (http/1.1)}{localhost:33049} 2024-12-04T21:50:42,100 INFO [Time-limited test {}] server.Server(415): Started @163030ms 2024-12-04T21:50:42,101 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:42,194 WARN [Thread-1368 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:42,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c9be06244364f87 with lease ID 0xb0b5040f23bbd204: from storage DS-5dd181de-54fa-48ad-b319-1630d6afba99 node DatanodeRegistration(127.0.0.1:46581, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=34965, infoSecurePort=0, ipcPort=41393, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:42,196 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c9be06244364f87 with lease ID 0xb0b5040f23bbd204: from storage DS-4e4444ce-aed3-40c4-8de6-2caa13fe1dde node DatanodeRegistration(127.0.0.1:46581, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=34965, infoSecurePort=0, ipcPort=41393, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:42,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:42,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:42,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:43,121 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-04T21:50:43,127 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-04T21:50:43,128 ERROR [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:43,129 WARN [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:43,129 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C46493%2C1733349028439:(num 1733349029228) roll requested 2024-12-04T21:50:43,129 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:43,136 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 newFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:43,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:43,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:43,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:43,137 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:43,137 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:43,137 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:43,138 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:43,138 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:43,138 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:43,138 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34121:34121),(127.0.0.1/127.0.0.1:34965:34965)] 2024-12-04T21:50:43,138 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 is not closed yet, will try archiving it next time 2024-12-04T21:50:43,138 WARN [IPC Server handler 0 on default port 32793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-12-04T21:50:43,139 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 after 1ms 2024-12-04T21:50:43,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:43,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:43,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:44,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:44,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:44,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:45,145 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-04T21:50:45,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:45,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:45,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:46,033 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T21:50:46,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:46,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:46,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:47,140 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 after 4002ms 2024-12-04T21:50:47,151 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:46581,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:47,151 WARN [DataStreamer for file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 block BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43089,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK], DatanodeInfoWithStorage[127.0.0.1:46581,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46581,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]) is bad. 2024-12-04T21:50:47,151 WARN [PacketResponder: BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46581] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:47,151 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:60466 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60466 dst: /127.0.0.1:43089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:47,152 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:53788 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46581:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53788 dst: /127.0.0.1:46581 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:47,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25bf9aaf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:47,153 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@791546e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:47,153 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:47,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7244bddc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:47,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b6134cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:47,154 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:50:47,154 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:50:47,154 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-69408873-172.17.0.2-1733349027842 (Datanode Uuid 71bb6dfc-d599-4394-a3b5-b55368101870) service to localhost/127.0.0.1:32793 2024-12-04T21:50:47,154 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:50:47,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data1/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:47,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data2/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:47,155 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:50:47,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:47,169 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:47,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:47,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:47,170 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:50:47,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3527dcb2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:47,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16ed96fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:47,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@282833ec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-43581-hadoop-hdfs-3_4_1-tests_jar-_-any-8232737559361928632/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:47,260 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73751e94{HTTP/1.1, (http/1.1)}{localhost:43581} 2024-12-04T21:50:47,260 INFO [Time-limited test {}] server.Server(415): Started @168190ms 2024-12-04T21:50:47,261 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:47,281 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:47,282 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-944470836_22 at /127.0.0.1:60474 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60474 dst: /127.0.0.1:43089 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:47,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f5c5ca8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:47,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a63cd4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:50:47,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:50:47,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@359657e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:50:47,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54a51f1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:50:47,285 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:50:47,285 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:50:47,285 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:50:47,285 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-69408873-172.17.0.2-1733349027842 (Datanode Uuid 76d9aac4-f00f-46a2-811b-2c517873e067) service to localhost/127.0.0.1:32793 2024-12-04T21:50:47,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data3/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:47,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data4/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:50:47,286 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:50:47,297 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:50:47,300 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:50:47,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:50:47,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:50:47,301 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:50:47,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f1c133c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:50:47,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@203ef201{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:50:47,322 WARN [Thread-1411 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:47,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2ccde092b54f8b6 with lease ID 0xb0b5040f23bbd205: from storage DS-5dd181de-54fa-48ad-b319-1630d6afba99 node DatanodeRegistration(127.0.0.1:39871, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=33755, infoSecurePort=0, ipcPort=45527, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:47,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2ccde092b54f8b6 with lease ID 0xb0b5040f23bbd205: from storage DS-4e4444ce-aed3-40c4-8de6-2caa13fe1dde node DatanodeRegistration(127.0.0.1:39871, datanodeUuid=71bb6dfc-d599-4394-a3b5-b55368101870, infoPort=33755, infoSecurePort=0, ipcPort=45527, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:47,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:47,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d9af1e8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/java.io.tmpdir/jetty-localhost-38209-hadoop-hdfs-3_4_1-tests_jar-_-any-6309322289243761807/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:50:47,400 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e4c4f04{HTTP/1.1, (http/1.1)}{localhost:38209} 2024-12-04T21:50:47,400 INFO [Time-limited test {}] server.Server(415): Started @168330ms 2024-12-04T21:50:47,402 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:50:47,460 WARN [Thread-1442 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:50:47,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd3e31314f43caaa with lease ID 0xb0b5040f23bbd206: from storage DS-125337f2-c826-494d-a68f-a6da5122d160 node DatanodeRegistration(127.0.0.1:37583, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=43485, infoSecurePort=0, ipcPort=46705, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:47,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd3e31314f43caaa with lease ID 0xb0b5040f23bbd206: from storage DS-9e94cea7-f9de-438b-8dfa-254740c8acfe node DatanodeRegistration(127.0.0.1:37583, datanodeUuid=76d9aac4-f00f-46a2-811b-2c517873e067, infoPort=43485, infoSecurePort=0, ipcPort=46705, storageInfo=lv=-57;cid=testClusterID;nsid=1759071356;c=1733349027842), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:50:47,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:47,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:48,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:48,418 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-04T21:50:48,423 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-04T21:50:48,427 ERROR [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43089,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:48,428 WARN [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43089,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:48,428 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C46493%2C1733349028439:(num 1733349043129) roll requested 2024-12-04T21:50:48,429 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.1733349048429 2024-12-04T21:50:48,436 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 newFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 2024-12-04T21:50:48,436 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:48,436 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:48,436 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:48,436 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:48,436 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:48,437 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 2024-12-04T21:50:48,437 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43089,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:48,437 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43089,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:48,437 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:48,438 WARN [IPC Server handler 1 on default port 32793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-04T21:50:48,438 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33755:33755),(127.0.0.1/127.0.0.1:43485:43485)] 2024-12-04T21:50:48,438 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 is not closed yet, will try archiving it next time 2024-12-04T21:50:48,438 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 after 1ms 2024-12-04T21:50:48,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:48,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:49,326 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T21:50:49,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:49,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:49,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:50,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:50,440 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:50,449 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 newFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:50,449 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:50,449 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:50,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:50,450 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:50,450 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:50,450 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:50,452 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43485:43485),(127.0.0.1/127.0.0.1:33755:33755)] 2024-12-04T21:50:50,452 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 is not closed yet, will try archiving it next time 2024-12-04T21:50:50,452 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 is not closed yet, will try archiving it next time 2024-12-04T21:50:50,453 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:50,453 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:50,454 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 after 1ms 2024-12-04T21:50:50,455 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:50,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741838_1019 (size=1264) 2024-12-04T21:50:50,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741838_1019 (size=1264) 2024-12-04T21:50:50,456 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 is not closed yet, will try archiving it next time 2024-12-04T21:50:50,465 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733349030156/Put/vlen=218/seqid=0] 2024-12-04T21:50:50,465 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733349039841/Put/vlen=1045/seqid=0] 2024-12-04T21:50:50,465 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349029228 2024-12-04T21:50:50,465 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:50,465 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:50,466 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 after 1ms 2024-12-04T21:50:50,466 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:50,469 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733349043128/Put/vlen=1045/seqid=0] 2024-12-04T21:50:50,469 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733349045148/Put/vlen=1045/seqid=0] 2024-12-04T21:50:50,469 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 2024-12-04T21:50:50,469 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 2024-12-04T21:50:50,469 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 2024-12-04T21:50:50,469 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 after 0ms 2024-12-04T21:50:50,469 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349048429 2024-12-04T21:50:50,472 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733349048426/Put/vlen=1045/seqid=0] 2024-12-04T21:50:50,472 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:50,472 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:50,472 WARN [IPC Server handler 3 on default port 32793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-04T21:50:50,473 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 after 1ms 2024-12-04T21:50:50,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:50,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:51,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:51,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1925752952_22 at /127.0.0.1:35634 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37583:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35634 dst: /127.0.0.1:37583 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37583 remote=/127.0.0.1:35634]. Total timeout mills is 60000, 58982 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:51,467 WARN [ResponseProcessor for block BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:51,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1925752952_22 at /127.0.0.1:35758 [Receiving block BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35758 dst: /127.0.0.1:39871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:50:51,468 WARN [DataStreamer for file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 block BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37583,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK], DatanodeInfoWithStorage[127.0.0.1:39871,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37583,DS-125337f2-c826-494d-a68f-a6da5122d160,DISK]) is bad. 2024-12-04T21:50:51,473 WARN [DataStreamer for file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 block BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:51,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741839_1022 (size=85) 2024-12-04T21:50:51,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741839_1022 (size=85) 2024-12-04T21:50:51,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:51,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:52,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:52,440 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349043129 after 4003ms 2024-12-04T21:50:52,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:52,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:53,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:53,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:53,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:54,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:54,474 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 after 4002ms 2024-12-04T21:50:54,475 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:54,482 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:54,482 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 85aba77a8e5747b7cf1d2c755905cb27 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-04T21:50:54,483 ERROR [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,483 WARN [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,484 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C46493%2C1733349028439:(num 1733349050439) roll requested 2024-12-04T21:50:54,484 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.1733349054484 2024-12-04T21:50:54,493 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 newFile=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349054484 2024-12-04T21:50:54,493 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,493 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,494 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,494 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,494 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,494 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349054484 2024-12-04T21:50:54,494 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,495 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43485:43485),(127.0.0.1/127.0.0.1:33755:33755)] 2024-12-04T21:50:54,495 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 is not closed yet, will try archiving it next time 2024-12-04T21:50:54,495 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-69408873-172.17.0.2-1733349027842:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,495 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:54,496 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 after 1ms 2024-12-04T21:50:54,496 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.1733349050439 to hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs/bb3046a53f79%2C46493%2C1733349028439.1733349050439 2024-12-04T21:50:54,513 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/.tmp/info/aa1aabbb94cc44a8af67e8e9a12d12fb is 1080, key is row1002/info:/1733349039841/Put/seqid=0 2024-12-04T21:50:54,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741841_1024 (size=9270) 2024-12-04T21:50:54,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741841_1024 (size=9270) 2024-12-04T21:50:54,519 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/.tmp/info/aa1aabbb94cc44a8af67e8e9a12d12fb 2024-12-04T21:50:54,524 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/.tmp/info/aa1aabbb94cc44a8af67e8e9a12d12fb as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/info/aa1aabbb94cc44a8af67e8e9a12d12fb 2024-12-04T21:50:54,531 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/info/aa1aabbb94cc44a8af67e8e9a12d12fb, entries=4, sequenceid=8, filesize=9.1 K 2024-12-04T21:50:54,532 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 85aba77a8e5747b7cf1d2c755905cb27 in 50ms, sequenceid=8, compaction requested=false 2024-12-04T21:50:54,532 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 85aba77a8e5747b7cf1d2c755905cb27: 2024-12-04T21:50:54,532 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-04T21:50:54,533 ERROR [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,533 WARN [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62-prefix:bb3046a53f79,46493,1733349028439.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,533 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C46493%2C1733349028439.meta:.meta(num 1733349029602) roll requested 2024-12-04T21:50:54,533 INFO [regionserver/bb3046a53f79:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46493%2C1733349028439.meta.1733349054533.meta 2024-12-04T21:50:54,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,539 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,539 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,539 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:54,539 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349054533.meta 2024-12-04T21:50:54,539 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,539 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:54,539 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta 2024-12-04T21:50:54,540 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33755:33755),(127.0.0.1/127.0.0.1:43485:43485)] 2024-12-04T21:50:54,540 DEBUG [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta is not closed yet, will try archiving it next time 2024-12-04T21:50:54,540 WARN [IPC Server handler 2 on default port 32793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-12-04T21:50:54,540 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta after 1ms 2024-12-04T21:50:54,552 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/info/96a0e5de48224e09aa293a2d3b3e6afc is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27./info:regioninfo/1733349030160/Put/seqid=0 2024-12-04T21:50:54,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741843_1027 (size=7125) 2024-12-04T21:50:54,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741843_1027 (size=7125) 2024-12-04T21:50:54,557 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/info/96a0e5de48224e09aa293a2d3b3e6afc 2024-12-04T21:50:54,575 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/ns/5cc71fac71234779bb7a95f2bcc01c8c is 43, key is default/ns:d/1733349029650/Put/seqid=0 2024-12-04T21:50:54,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741844_1028 (size=5153) 2024-12-04T21:50:54,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741844_1028 (size=5153) 2024-12-04T21:50:54,580 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/ns/5cc71fac71234779bb7a95f2bcc01c8c 2024-12-04T21:50:54,598 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/table/942052d2e3354df68fb5593a6c5ca092 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733349030171/Put/seqid=0 2024-12-04T21:50:54,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741845_1029 (size=5438) 2024-12-04T21:50:54,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741845_1029 (size=5438) 2024-12-04T21:50:54,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:54,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:55,005 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/table/942052d2e3354df68fb5593a6c5ca092 2024-12-04T21:50:55,018 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/info/96a0e5de48224e09aa293a2d3b3e6afc as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/info/96a0e5de48224e09aa293a2d3b3e6afc 2024-12-04T21:50:55,027 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/info/96a0e5de48224e09aa293a2d3b3e6afc, entries=10, sequenceid=11, filesize=7.0 K 2024-12-04T21:50:55,028 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/ns/5cc71fac71234779bb7a95f2bcc01c8c as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/ns/5cc71fac71234779bb7a95f2bcc01c8c 2024-12-04T21:50:55,034 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/ns/5cc71fac71234779bb7a95f2bcc01c8c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T21:50:55,035 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/.tmp/table/942052d2e3354df68fb5593a6c5ca092 as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/table/942052d2e3354df68fb5593a6c5ca092 2024-12-04T21:50:55,041 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/table/942052d2e3354df68fb5593a6c5ca092, entries=2, sequenceid=11, filesize=5.3 K 2024-12-04T21:50:55,042 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 510ms, sequenceid=11, compaction requested=false 2024-12-04T21:50:55,042 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-04T21:50:55,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:50:55,048 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:50:55,048 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:55,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:55,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:55,049 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:50:55,049 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:50:55,049 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2099266181, stopped=false 2024-12-04T21:50:55,049 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,43273,1733349028397 2024-12-04T21:50:55,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:55,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:50:55,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:55,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:55,050 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:50:55,051 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:50:55,051 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:55,051 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:55,051 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,46493,1733349028439' ***** 2024-12-04T21:50:55,051 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:50:55,051 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:50:55,051 INFO [RS:0;bb3046a53f79:46493 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:50:55,051 INFO [RS:0;bb3046a53f79:46493 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:50:55,051 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:55,051 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(3091): Received CLOSE for 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:55,052 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,46493,1733349028439 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:46493. 2024-12-04T21:50:55,052 DEBUG [RS:0;bb3046a53f79:46493 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:50:55,052 DEBUG [RS:0;bb3046a53f79:46493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:55,052 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 85aba77a8e5747b7cf1d2c755905cb27, disabling compactions & flushes 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:50:55,052 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:50:55,052 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:55,052 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:50:55,052 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. after waiting 0 ms 2024-12-04T21:50:55,052 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:55,053 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T21:50:55,053 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1325): Online Regions={85aba77a8e5747b7cf1d2c755905cb27=TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27., 1588230740=hbase:meta,,1.1588230740} 2024-12-04T21:50:55,053 DEBUG [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 85aba77a8e5747b7cf1d2c755905cb27 2024-12-04T21:50:55,053 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:50:55,053 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:50:55,053 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:50:55,053 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:50:55,053 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:50:55,064 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/default/TestLogRolling-testLogRollOnPipelineRestart/85aba77a8e5747b7cf1d2c755905cb27/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-04T21:50:55,065 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:55,065 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 85aba77a8e5747b7cf1d2c755905cb27: Waiting for close lock at 1733349055052Running coprocessor pre-close hooks at 1733349055052Disabling compacts and flushes for region at 1733349055052Disabling writes for close at 1733349055052Writing region close event to WAL at 1733349055053 (+1 ms)Running coprocessor post-close hooks at 1733349055065 (+12 ms)Closed at 1733349055065 2024-12-04T21:50:55,065 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733349029785.85aba77a8e5747b7cf1d2c755905cb27. 2024-12-04T21:50:55,079 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T21:50:55,080 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:50:55,080 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:50:55,080 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349055053Running coprocessor pre-close hooks at 1733349055053Disabling compacts and flushes for region at 1733349055053Disabling writes for close at 1733349055053Writing region close event to WAL at 1733349055065 (+12 ms)Running coprocessor post-close hooks at 1733349055080 (+15 ms)Closed at 1733349055080 2024-12-04T21:50:55,080 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:50:55,096 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T21:50:55,096 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T21:50:55,098 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:50:55,253 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,46493,1733349028439; all regions closed. 2024-12-04T21:50:55,253 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:55,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:55,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:55,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:55,255 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:55,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741842_1025 (size=825) 2024-12-04T21:50:55,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741842_1025 (size=825) 2024-12-04T21:50:55,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:55,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:55,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:56,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:56,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:56,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:57,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:57,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:57,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:58,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:58,378 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:50:58,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T21:50:58,542 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta after 4002ms 2024-12-04T21:50:58,543 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/WALs/bb3046a53f79,46493,1733349028439/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta to hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs/bb3046a53f79%2C46493%2C1733349028439.meta.1733349029602.meta 2024-12-04T21:50:58,550 DEBUG [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs 2024-12-04T21:50:58,550 INFO [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C46493%2C1733349028439.meta:.meta(num 1733349054533) 2024-12-04T21:50:58,551 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,551 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,551 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,552 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,552 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741840_1023 (size=1162) 2024-12-04T21:50:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741840_1023 (size=1162) 2024-12-04T21:50:58,563 DEBUG [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs 2024-12-04T21:50:58,563 INFO [RS:0;bb3046a53f79:46493 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C46493%2C1733349028439:(num 1733349054484) 2024-12-04T21:50:58,563 DEBUG [RS:0;bb3046a53f79:46493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:50:58,563 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:50:58,563 INFO [RS:0;bb3046a53f79:46493 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:50:58,563 INFO [RS:0;bb3046a53f79:46493 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T21:50:58,564 INFO [RS:0;bb3046a53f79:46493 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:50:58,564 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:50:58,564 INFO [RS:0;bb3046a53f79:46493 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46493 2024-12-04T21:50:58,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,46493,1733349028439 2024-12-04T21:50:58,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:50:58,566 INFO [RS:0;bb3046a53f79:46493 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:50:58,567 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,46493,1733349028439] 2024-12-04T21:50:58,567 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,46493,1733349028439 already deleted, retry=false 2024-12-04T21:50:58,567 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,46493,1733349028439 expired; onlineServers=0 2024-12-04T21:50:58,568 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,43273,1733349028397' ***** 2024-12-04T21:50:58,568 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:50:58,568 INFO [M:0;bb3046a53f79:43273 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:50:58,568 INFO [M:0;bb3046a53f79:43273 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:50:58,568 DEBUG [M:0;bb3046a53f79:43273 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:50:58,568 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:50:58,568 DEBUG [M:0;bb3046a53f79:43273 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:50:58,568 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349028983 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349028983,5,FailOnTimeoutGroup] 2024-12-04T21:50:58,568 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349028985 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349028985,5,FailOnTimeoutGroup] 2024-12-04T21:50:58,568 INFO [M:0;bb3046a53f79:43273 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:50:58,568 INFO [M:0;bb3046a53f79:43273 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:50:58,568 DEBUG [M:0;bb3046a53f79:43273 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:50:58,568 INFO [M:0;bb3046a53f79:43273 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:50:58,569 INFO [M:0;bb3046a53f79:43273 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:50:58,569 INFO [M:0;bb3046a53f79:43273 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:50:58,569 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:50:58,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:50:58,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:50:58,569 DEBUG [M:0;bb3046a53f79:43273 {}] zookeeper.ZKUtil(347): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:50:58,569 WARN [M:0;bb3046a53f79:43273 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:50:58,570 INFO [M:0;bb3046a53f79:43273 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/.lastflushedseqids 2024-12-04T21:50:58,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741846_1030 (size=130) 2024-12-04T21:50:58,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741846_1030 (size=130) 2024-12-04T21:50:58,575 INFO [M:0;bb3046a53f79:43273 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:50:58,576 INFO [M:0;bb3046a53f79:43273 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:50:58,576 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:50:58,576 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:58,576 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:58,576 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:50:58,576 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:58,576 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-12-04T21:50:58,576 ERROR [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData-prefix:bb3046a53f79,43273,1733349028397 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:58,576 WARN [FSHLog-0-hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData-prefix:bb3046a53f79,43273,1733349028397 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:58,576 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog bb3046a53f79%2C43273%2C1733349028397:(num 1733349028921) roll requested 2024-12-04T21:50:58,577 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C43273%2C1733349028397.1733349058577 2024-12-04T21:50:58,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,581 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,581 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,582 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349058577 2024-12-04T21:50:58,582 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:58,582 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33911,DS-5dd181de-54fa-48ad-b319-1630d6afba99,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-04T21:50:58,582 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 2024-12-04T21:50:58,582 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33755:33755),(127.0.0.1/127.0.0.1:43485:43485)] 2024-12-04T21:50:58,582 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 is not closed yet, will try archiving it next time 2024-12-04T21:50:58,583 WARN [IPC Server handler 3 on default port 32793 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-04T21:50:58,583 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 after 1ms 2024-12-04T21:50:58,596 DEBUG [M:0;bb3046a53f79:43273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f4911938db284d9abf0c0fb34548e7ab is 82, key is hbase:meta,,1/info:regioninfo/1733349029633/Put/seqid=0 2024-12-04T21:50:58,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741848_1033 (size=5672) 2024-12-04T21:50:58,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741848_1033 (size=5672) 2024-12-04T21:50:58,601 INFO [M:0;bb3046a53f79:43273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f4911938db284d9abf0c0fb34548e7ab 2024-12-04T21:50:58,619 DEBUG [M:0;bb3046a53f79:43273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4a4219dcca644960bce5c275d747e331 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733349030175/Put/seqid=0 2024-12-04T21:50:58,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741849_1034 (size=6119) 2024-12-04T21:50:58,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741849_1034 (size=6119) 2024-12-04T21:50:58,624 INFO [M:0;bb3046a53f79:43273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4a4219dcca644960bce5c275d747e331 2024-12-04T21:50:58,641 DEBUG [M:0;bb3046a53f79:43273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3f31d0156a448ea8df2d9224904456e is 69, key is bb3046a53f79,46493,1733349028439/rs:state/1733349029082/Put/seqid=0 2024-12-04T21:50:58,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741850_1035 (size=5156) 2024-12-04T21:50:58,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741850_1035 (size=5156) 2024-12-04T21:50:58,646 INFO [M:0;bb3046a53f79:43273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3f31d0156a448ea8df2d9224904456e 2024-12-04T21:50:58,663 DEBUG [M:0;bb3046a53f79:43273 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/571b5a263cf5425783d6bbf7f3f36f2a is 52, key is load_balancer_on/state:d/1733349029779/Put/seqid=0 2024-12-04T21:50:58,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:58,667 INFO [RS:0;bb3046a53f79:46493 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:50:58,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46493-0x100a7368e8c0001, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:50:58,667 INFO [RS:0;bb3046a53f79:46493 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,46493,1733349028439; zookeeper connection closed. 2024-12-04T21:50:58,667 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68e38fa5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68e38fa5 2024-12-04T21:50:58,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741851_1036 (size=5056) 2024-12-04T21:50:58,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741851_1036 (size=5056) 2024-12-04T21:50:58,667 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T21:50:58,668 INFO [M:0;bb3046a53f79:43273 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/571b5a263cf5425783d6bbf7f3f36f2a 2024-12-04T21:50:58,673 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f4911938db284d9abf0c0fb34548e7ab as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f4911938db284d9abf0c0fb34548e7ab 2024-12-04T21:50:58,678 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f4911938db284d9abf0c0fb34548e7ab, entries=8, sequenceid=56, filesize=5.5 K 2024-12-04T21:50:58,679 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4a4219dcca644960bce5c275d747e331 as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4a4219dcca644960bce5c275d747e331 2024-12-04T21:50:58,683 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4a4219dcca644960bce5c275d747e331, entries=6, sequenceid=56, filesize=6.0 K 2024-12-04T21:50:58,684 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3f31d0156a448ea8df2d9224904456e as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3f31d0156a448ea8df2d9224904456e 2024-12-04T21:50:58,689 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3f31d0156a448ea8df2d9224904456e, entries=1, sequenceid=56, filesize=5.0 K 2024-12-04T21:50:58,690 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/571b5a263cf5425783d6bbf7f3f36f2a as hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/571b5a263cf5425783d6bbf7f3f36f2a 2024-12-04T21:50:58,694 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/571b5a263cf5425783d6bbf7f3f36f2a, entries=1, sequenceid=56, filesize=4.9 K 2024-12-04T21:50:58,695 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=56, compaction requested=false 2024-12-04T21:50:58,696 INFO [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:50:58,696 DEBUG [M:0;bb3046a53f79:43273 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349058576Disabling compacts and flushes for region at 1733349058576Disabling writes for close at 1733349058576Obtaining lock to block concurrent updates at 1733349058576Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733349058576Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733349058576Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733349058583 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733349058583Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733349058596 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733349058596Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733349058606 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733349058619 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733349058619Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733349058628 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733349058641 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733349058641Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733349058650 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733349058663 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733349058663Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c34427: reopening flushed file at 1733349058672 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55e5d6cc: reopening flushed file at 1733349058678 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67ec9d34: reopening flushed file at 1733349058684 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40d64706: reopening flushed file at 1733349058689 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=56, compaction requested=false at 1733349058695 (+6 ms)Writing region close event to WAL at 1733349058696 (+1 ms)Closed at 1733349058696 2024-12-04T21:50:58,697 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,697 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,697 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,697 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,697 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:50:58,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37583 is added to blk_1073741847_1031 (size=757) 2024-12-04T21:50:58,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741847_1031 (size=757) 2024-12-04T21:50:58,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:58,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:59,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:59,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:50:59,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:00,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:00,595 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:51:00,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:00,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:00,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:01,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:01,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-04T21:51:01,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:01,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:02,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:02,584 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 after 4002ms 2024-12-04T21:51:02,586 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/WALs/bb3046a53f79,43273,1733349028397/bb3046a53f79%2C43273%2C1733349028397.1733349028921 to hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/oldWALs/bb3046a53f79%2C43273%2C1733349028397.1733349028921 2024-12-04T21:51:02,594 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/MasterData/oldWALs/bb3046a53f79%2C43273%2C1733349028397.1733349028921 to hdfs://localhost:32793/user/jenkins/test-data/5ecde223-aad6-6640-9ca6-4542f7268a62/oldWALs/bb3046a53f79%2C43273%2C1733349028397.1733349028921$masterlocalwal$ 2024-12-04T21:51:02,595 INFO [M:0;bb3046a53f79:43273 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:51:02,595 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:51:02,595 INFO [M:0;bb3046a53f79:43273 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43273 2024-12-04T21:51:02,595 INFO [M:0;bb3046a53f79:43273 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:51:02,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:51:02,698 INFO [M:0;bb3046a53f79:43273 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:51:02,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43273-0x100a7368e8c0000, quorum=127.0.0.1:62257, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:51:02,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d9af1e8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:02,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e4c4f04{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:51:02,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:51:02,706 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@203ef201{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:51:02,706 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f1c133c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:51:02,708 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:51:02,708 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:51:02,708 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-69408873-172.17.0.2-1733349027842 (Datanode Uuid 76d9aac4-f00f-46a2-811b-2c517873e067) service to localhost/127.0.0.1:32793 2024-12-04T21:51:02,708 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:51:02,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data3/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:02,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data4/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:02,709 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:51:02,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@282833ec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:02,711 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73751e94{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:51:02,712 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:51:02,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16ed96fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:51:02,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3527dcb2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:51:02,713 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:51:02,713 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:51:02,713 WARN [BP-69408873-172.17.0.2-1733349027842 heartbeating to localhost/127.0.0.1:32793 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-69408873-172.17.0.2-1733349027842 (Datanode Uuid 71bb6dfc-d599-4394-a3b5-b55368101870) service to localhost/127.0.0.1:32793 2024-12-04T21:51:02,713 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:51:02,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data1/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:02,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/cluster_ce13027e-6718-9657-7dc2-9d6b2d37c8ff/data/data2/current/BP-69408873-172.17.0.2-1733349027842 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:02,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:51:02,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16b67381{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:51:02,721 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6274e59b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:51:02,721 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:51:02,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5771e35b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:51:02,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@521c98fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir/,STOPPED} 2024-12-04T21:51:02,727 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:51:02,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:51:02,754 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 156) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:32793 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:32793 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:32793 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32793 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:32793 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 434) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=99 (was 121), ProcessCount=11 (was 11), AvailableMemoryMB=2279 (was 2529) 2024-12-04T21:51:02,760 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=99, ProcessCount=11, AvailableMemoryMB=2278 2024-12-04T21:51:02,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:51:02,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.log.dir so I do NOT create it in target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d 2024-12-04T21:51:02,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bf5462c2-d6e9-b022-d23e-ff0463d089ef/hadoop.tmp.dir so I do NOT create it in target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d 2024-12-04T21:51:02,760 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164, deleteOnExit=true 2024-12-04T21:51:02,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/test.cache.data in system properties and HBase conf 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:51:02,761 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:51:02,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:51:02,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:51:02,773 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:51:02,821 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:02,824 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:51:02,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:51:02,825 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:51:02,825 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:51:02,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:02,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b7eaed2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:51:02,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28ceda60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:51:02,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3db8de80{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/java.io.tmpdir/jetty-localhost-44671-hadoop-hdfs-3_4_1-tests_jar-_-any-5290871492647131873/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:51:02,916 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f36b22{HTTP/1.1, (http/1.1)}{localhost:44671} 2024-12-04T21:51:02,916 INFO [Time-limited test {}] server.Server(415): Started @183846ms 2024-12-04T21:51:02,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:02,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:02,926 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:51:02,963 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:02,967 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:51:02,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:51:02,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:51:02,968 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:51:02,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cb73ec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:51:02,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cb8a90e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:51:03,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28e075ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/java.io.tmpdir/jetty-localhost-45989-hadoop-hdfs-3_4_1-tests_jar-_-any-3531541898320647105/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:03,061 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f0c0d1b{HTTP/1.1, (http/1.1)}{localhost:45989} 2024-12-04T21:51:03,061 INFO [Time-limited test {}] server.Server(415): Started @183991ms 2024-12-04T21:51:03,062 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:51:03,085 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:03,088 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:51:03,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:51:03,088 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:51:03,088 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:51:03,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76a9ae4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:51:03,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68282ab6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:51:03,113 WARN [Thread-1636 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data1/current/BP-589344384-172.17.0.2-1733349062783/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:03,113 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data2/current/BP-589344384-172.17.0.2-1733349062783/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:03,132 WARN [Thread-1615 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:51:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c69a63a2c1c3a80 with lease ID 0x22d488c7a3270d50: Processing first storage report for DS-2d070173-386c-4732-9783-13afd8b26641 from datanode DatanodeRegistration(127.0.0.1:46585, datanodeUuid=e8fc9aff-6b1a-44e9-bdf1-233e32b122b8, infoPort=37757, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783) 2024-12-04T21:51:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c69a63a2c1c3a80 with lease ID 0x22d488c7a3270d50: from storage DS-2d070173-386c-4732-9783-13afd8b26641 node DatanodeRegistration(127.0.0.1:46585, datanodeUuid=e8fc9aff-6b1a-44e9-bdf1-233e32b122b8, infoPort=37757, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c69a63a2c1c3a80 with lease ID 0x22d488c7a3270d50: Processing first storage report for DS-e0f5a88c-6ea9-41d8-aa99-06d0bff87c9f from datanode DatanodeRegistration(127.0.0.1:46585, datanodeUuid=e8fc9aff-6b1a-44e9-bdf1-233e32b122b8, infoPort=37757, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783) 2024-12-04T21:51:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c69a63a2c1c3a80 with lease ID 0x22d488c7a3270d50: from storage DS-e0f5a88c-6ea9-41d8-aa99-06d0bff87c9f node DatanodeRegistration(127.0.0.1:46585, datanodeUuid=e8fc9aff-6b1a-44e9-bdf1-233e32b122b8, infoPort=37757, infoSecurePort=0, ipcPort=37703, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:03,187 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@594a0329{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/java.io.tmpdir/jetty-localhost-36023-hadoop-hdfs-3_4_1-tests_jar-_-any-15116393441997598334/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:03,187 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fc8e768{HTTP/1.1, (http/1.1)}{localhost:36023} 2024-12-04T21:51:03,187 INFO [Time-limited test {}] server.Server(415): Started @184117ms 2024-12-04T21:51:03,188 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:51:03,240 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data4/current/BP-589344384-172.17.0.2-1733349062783/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:03,240 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data3/current/BP-589344384-172.17.0.2-1733349062783/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:03,253 WARN [Thread-1651 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:51:03,255 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d7ae4cfbdd41d05 with lease ID 0x22d488c7a3270d51: Processing first storage report for DS-2f6117d1-a869-4641-94b1-bee1a110c728 from datanode DatanodeRegistration(127.0.0.1:40085, datanodeUuid=ddae31d1-22d6-4563-8ca0-e1244be38825, infoPort=34071, infoSecurePort=0, ipcPort=34069, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783) 2024-12-04T21:51:03,255 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d7ae4cfbdd41d05 with lease ID 0x22d488c7a3270d51: from storage DS-2f6117d1-a869-4641-94b1-bee1a110c728 node DatanodeRegistration(127.0.0.1:40085, datanodeUuid=ddae31d1-22d6-4563-8ca0-e1244be38825, infoPort=34071, infoSecurePort=0, ipcPort=34069, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:03,256 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d7ae4cfbdd41d05 with lease ID 0x22d488c7a3270d51: Processing first storage report for DS-f2584936-49b1-435e-929f-0bdc46da3914 from datanode DatanodeRegistration(127.0.0.1:40085, datanodeUuid=ddae31d1-22d6-4563-8ca0-e1244be38825, infoPort=34071, infoSecurePort=0, ipcPort=34069, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783) 2024-12-04T21:51:03,256 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d7ae4cfbdd41d05 with lease ID 0x22d488c7a3270d51: from storage DS-f2584936-49b1-435e-929f-0bdc46da3914 node DatanodeRegistration(127.0.0.1:40085, datanodeUuid=ddae31d1-22d6-4563-8ca0-e1244be38825, infoPort=34071, infoSecurePort=0, ipcPort=34069, storageInfo=lv=-57;cid=testClusterID;nsid=1897402783;c=1733349062783), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:03,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d 2024-12-04T21:51:03,313 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/zookeeper_0, clientPort=54713, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:51:03,315 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54713 2024-12-04T21:51:03,315 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,317 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:51:03,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:51:03,326 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd with version=8 2024-12-04T21:51:03,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:51:03,328 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:51:03,328 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:51:03,329 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33977 2024-12-04T21:51:03,330 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33977 connecting to ZooKeeper ensemble=127.0.0.1:54713 2024-12-04T21:51:03,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339770x0, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:51:03,334 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33977-0x100a73716ff0000 connected 2024-12-04T21:51:03,344 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,346 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,348 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:51:03,349 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd, hbase.cluster.distributed=false 2024-12-04T21:51:03,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:51:03,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33977 2024-12-04T21:51:03,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33977 2024-12-04T21:51:03,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33977 2024-12-04T21:51:03,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:03,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33977 2024-12-04T21:51:03,353 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33977 2024-12-04T21:51:03,370 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:51:03,370 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:51:03,371 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33309 2024-12-04T21:51:03,372 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33309 connecting to ZooKeeper ensemble=127.0.0.1:54713 2024-12-04T21:51:03,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333090x0, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:51:03,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:333090x0, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:51:03,377 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33309-0x100a73716ff0001 connected 2024-12-04T21:51:03,377 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:51:03,378 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:51:03,378 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:51:03,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:51:03,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33309 2024-12-04T21:51:03,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33309 2024-12-04T21:51:03,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33309 2024-12-04T21:51:03,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33309 2024-12-04T21:51:03,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33309 2024-12-04T21:51:03,390 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:33977 2024-12-04T21:51:03,391 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:03,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:03,392 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:51:03,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,393 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:51:03,394 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,33977,1733349063328 from backup master directory 2024-12-04T21:51:03,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:03,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:03,394 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:51:03,394 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,398 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/hbase.id] with ID: cbd56800-b9e9-4e7d-9756-53b27a9fb7d0 2024-12-04T21:51:03,398 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/.tmp/hbase.id 2024-12-04T21:51:03,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:51:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:51:03,403 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/.tmp/hbase.id]:[hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/hbase.id] 2024-12-04T21:51:03,415 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:03,415 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:51:03,416 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T21:51:03,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:51:03,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:51:03,424 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:51:03,425 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:51:03,425 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:51:03,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:51:03,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:51:03,432 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store 2024-12-04T21:51:03,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:51:03,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:51:03,438 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:03,438 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:51:03,438 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:03,438 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:03,438 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:51:03,438 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:03,438 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:03,438 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349063438Disabling compacts and flushes for region at 1733349063438Disabling writes for close at 1733349063438Writing region close event to WAL at 1733349063438Closed at 1733349063438 2024-12-04T21:51:03,439 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/.initializing 2024-12-04T21:51:03,439 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/WALs/bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,442 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C33977%2C1733349063328, suffix=, logDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/WALs/bb3046a53f79,33977,1733349063328, archiveDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/oldWALs, maxLogs=10 2024-12-04T21:51:03,442 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C33977%2C1733349063328.1733349063442 2024-12-04T21:51:03,446 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/WALs/bb3046a53f79,33977,1733349063328/bb3046a53f79%2C33977%2C1733349063328.1733349063442 2024-12-04T21:51:03,449 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34071:34071),(127.0.0.1/127.0.0.1:37757:37757)] 2024-12-04T21:51:03,450 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:51:03,450 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:03,451 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,451 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:51:03,454 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:03,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:51:03,456 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:03,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:51:03,458 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:03,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,459 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:51:03,460 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:03,460 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,461 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,461 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,462 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,462 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,463 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:51:03,464 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:03,465 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:51:03,466 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729660, jitterRate=-0.07219022512435913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:51:03,466 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733349063451Initializing all the Stores at 1733349063451Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349063451Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349063453 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349063453Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349063453Cleaning up temporary data from old regions at 1733349063462 (+9 ms)Region opened successfully at 1733349063466 (+4 ms) 2024-12-04T21:51:03,466 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:51:03,469 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb15d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:51:03,470 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:51:03,470 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:51:03,470 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:51:03,470 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:51:03,471 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T21:51:03,471 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T21:51:03,471 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:51:03,473 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:51:03,474 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:51:03,474 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:51:03,474 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:51:03,475 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:51:03,476 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:51:03,476 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:51:03,477 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:51:03,477 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:51:03,478 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:51:03,479 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:51:03,480 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:51:03,481 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:51:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:51:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:51:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,482 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,33977,1733349063328, sessionid=0x100a73716ff0000, setting cluster-up flag (Was=false) 2024-12-04T21:51:03,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,486 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:51:03,487 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,492 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:51:03,493 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,33977,1733349063328 2024-12-04T21:51:03,494 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:51:03,495 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:03,495 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:51:03,495 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:51:03,496 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,33977,1733349063328 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:51:03,497 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733349093498 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:51:03,498 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:03,498 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,499 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:51:03,499 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349063499,5,FailOnTimeoutGroup] 2024-12-04T21:51:03,499 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349063499,5,FailOnTimeoutGroup] 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,499 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,499 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,500 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:51:03,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:51:03,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:51:03,506 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:51:03,506 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd 2024-12-04T21:51:03,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:51:03,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:51:03,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:03,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:51:03,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:51:03,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:03,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:51:03,517 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:51:03,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:03,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:51:03,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:51:03,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:03,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:51:03,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:51:03,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:03,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:03,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:51:03,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740 2024-12-04T21:51:03,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740 2024-12-04T21:51:03,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:51:03,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:51:03,524 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:51:03,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:51:03,528 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:51:03,528 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777372, jitterRate=-0.01152084767818451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:51:03,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733349063512Initializing all the Stores at 1733349063513 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349063513Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349063513Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349063513Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349063513Cleaning up temporary data from old regions at 1733349063524 (+11 ms)Region opened successfully at 1733349063529 (+5 ms) 2024-12-04T21:51:03,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:51:03,529 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:51:03,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:51:03,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:51:03,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:51:03,529 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:51:03,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349063529Disabling compacts and flushes for region at 1733349063529Disabling writes for close at 1733349063529Writing region close event to WAL at 1733349063529Closed at 1733349063529 2024-12-04T21:51:03,530 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:03,531 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:51:03,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:51:03,532 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:51:03,533 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:51:03,584 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(746): ClusterId : cbd56800-b9e9-4e7d-9756-53b27a9fb7d0 2024-12-04T21:51:03,585 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:51:03,589 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:51:03,589 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:51:03,593 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:51:03,594 DEBUG [RS:0;bb3046a53f79:33309 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@628b87e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:51:03,609 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:33309 2024-12-04T21:51:03,609 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:51:03,609 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:51:03,609 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:51:03,609 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,33977,1733349063328 with port=33309, startcode=1733349063370 2024-12-04T21:51:03,610 DEBUG [RS:0;bb3046a53f79:33309 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:51:03,611 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52417, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:51:03,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33977 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33977 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,613 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd 2024-12-04T21:51:03,613 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38849 2024-12-04T21:51:03,613 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:51:03,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:51:03,614 DEBUG [RS:0;bb3046a53f79:33309 {}] zookeeper.ZKUtil(111): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,614 WARN [RS:0;bb3046a53f79:33309 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:51:03,615 INFO [RS:0;bb3046a53f79:33309 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:51:03,615 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,615 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,33309,1733349063370] 2024-12-04T21:51:03,617 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:51:03,618 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:51:03,619 INFO [RS:0;bb3046a53f79:33309 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:51:03,619 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,619 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:51:03,620 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:51:03,620 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:51:03,620 DEBUG [RS:0;bb3046a53f79:33309 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:51:03,621 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,621 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,621 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,621 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,621 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,621 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33309,1733349063370-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:51:03,635 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:51:03,635 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33309,1733349063370-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,636 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,636 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.Replication(171): bb3046a53f79,33309,1733349063370 started 2024-12-04T21:51:03,646 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:03,646 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,33309,1733349063370, RpcServer on bb3046a53f79/172.17.0.2:33309, sessionid=0x100a73716ff0001 2024-12-04T21:51:03,646 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:51:03,646 DEBUG [RS:0;bb3046a53f79:33309 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,646 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,33309,1733349063370' 2024-12-04T21:51:03,646 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:51:03,647 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:51:03,647 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:51:03,648 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:51:03,648 DEBUG [RS:0;bb3046a53f79:33309 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,648 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,33309,1733349063370' 2024-12-04T21:51:03,648 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:51:03,648 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:51:03,648 DEBUG [RS:0;bb3046a53f79:33309 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:51:03,648 INFO [RS:0;bb3046a53f79:33309 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:51:03,648 INFO [RS:0;bb3046a53f79:33309 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:51:03,683 WARN [bb3046a53f79:33977 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:51:03,753 INFO [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C33309%2C1733349063370, suffix=, logDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370, archiveDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/oldWALs, maxLogs=32 2024-12-04T21:51:03,754 INFO [RS:0;bb3046a53f79:33309 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C33309%2C1733349063370.1733349063754 2024-12-04T21:51:03,761 INFO [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349063754 2024-12-04T21:51:03,765 DEBUG [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37757:37757),(127.0.0.1/127.0.0.1:34071:34071)] 2024-12-04T21:51:03,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:03,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:03,933 DEBUG [bb3046a53f79:33977 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:51:03,934 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,33309,1733349063370 2024-12-04T21:51:03,936 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,33309,1733349063370, state=OPENING 2024-12-04T21:51:03,938 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:51:03,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:03,941 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:51:03,941 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:03,941 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:03,942 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,33309,1733349063370}] 2024-12-04T21:51:04,098 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:51:04,103 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50897, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:51:04,110 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:51:04,110 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:51:04,113 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C33309%2C1733349063370.meta, suffix=.meta, logDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370, archiveDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/oldWALs, maxLogs=32 2024-12-04T21:51:04,113 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C33309%2C1733349063370.meta.1733349064113.meta 2024-12-04T21:51:04,123 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.meta.1733349064113.meta 2024-12-04T21:51:04,126 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37757:37757),(127.0.0.1/127.0.0.1:34071:34071)] 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:51:04,127 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:51:04,127 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:51:04,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:51:04,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:51:04,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:04,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:04,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:51:04,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:51:04,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:04,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:04,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:51:04,132 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:51:04,132 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:04,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:04,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:51:04,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:51:04,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:04,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:04,133 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:51:04,134 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740 2024-12-04T21:51:04,135 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740 2024-12-04T21:51:04,136 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:51:04,136 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:51:04,136 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:51:04,137 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:51:04,138 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797706, jitterRate=0.01433590054512024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:51:04,138 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:51:04,139 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733349064127Writing region info on filesystem at 1733349064127Initializing all the Stores at 1733349064128 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349064128Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349064128Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349064128Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349064128Cleaning up temporary data from old regions at 1733349064136 (+8 ms)Running coprocessor post-open hooks at 1733349064138 (+2 ms)Region opened successfully at 1733349064139 (+1 ms) 2024-12-04T21:51:04,140 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733349064098 2024-12-04T21:51:04,142 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:51:04,142 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:51:04,143 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,33309,1733349063370 2024-12-04T21:51:04,144 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,33309,1733349063370, state=OPEN 2024-12-04T21:51:04,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:51:04,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:51:04,146 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,33309,1733349063370 2024-12-04T21:51:04,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:04,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:04,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:51:04,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,33309,1733349063370 in 205 msec 2024-12-04T21:51:04,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:51:04,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 618 msec 2024-12-04T21:51:04,153 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:04,153 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:51:04,154 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:51:04,154 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,33309,1733349063370, seqNum=-1] 2024-12-04T21:51:04,154 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:51:04,155 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36405, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:51:04,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-12-04T21:51:04,161 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733349064160, completionTime=-1 2024-12-04T21:51:04,161 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:51:04,161 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:51:04,162 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:51:04,162 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733349124162 2024-12-04T21:51:04,162 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349184162 2024-12-04T21:51:04,162 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T21:51:04,163 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33977,1733349063328-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,163 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33977,1733349063328-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,163 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33977,1733349063328-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,163 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:33977, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,163 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,163 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,164 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.772sec 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33977,1733349063328-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:51:04,166 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33977,1733349063328-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:51:04,168 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:51:04,168 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:51:04,168 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,33977,1733349063328-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:04,183 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e64a5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:51:04,183 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,33977,-1 for getting cluster id 2024-12-04T21:51:04,183 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:51:04,186 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbd56800-b9e9-4e7d-9756-53b27a9fb7d0' 2024-12-04T21:51:04,186 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:51:04,187 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbd56800-b9e9-4e7d-9756-53b27a9fb7d0" 2024-12-04T21:51:04,187 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5610732e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:51:04,187 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,33977,-1] 2024-12-04T21:51:04,187 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:51:04,187 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:04,189 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:51:04,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4887e6c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:51:04,191 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:51:04,192 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,33309,1733349063370, seqNum=-1] 2024-12-04T21:51:04,192 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:51:04,194 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:51:04,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,33977,1733349063328 2024-12-04T21:51:04,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:04,199 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:51:04,200 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T21:51:04,201 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is bb3046a53f79,33977,1733349063328 2024-12-04T21:51:04,201 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4b7aec62 2024-12-04T21:51:04,201 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T21:51:04,202 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49184, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T21:51:04,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T21:51:04,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T21:51:04,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:51:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:04,207 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T21:51:04,207 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:04,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-04T21:51:04,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:51:04,208 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T21:51:04,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741835_1011 (size=405) 2024-12-04T21:51:04,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741835_1011 (size=405) 2024-12-04T21:51:04,217 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e9202790fd5e198d06c7429ce2832402, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd 2024-12-04T21:51:04,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741836_1012 (size=88) 2024-12-04T21:51:04,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741836_1012 (size=88) 2024-12-04T21:51:04,226 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:04,226 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing e9202790fd5e198d06c7429ce2832402, disabling compactions & flushes 2024-12-04T21:51:04,226 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,226 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,226 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. after waiting 0 ms 2024-12-04T21:51:04,226 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,226 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,226 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for e9202790fd5e198d06c7429ce2832402: Waiting for close lock at 1733349064226Disabling compacts and flushes for region at 1733349064226Disabling writes for close at 1733349064226Writing region close event to WAL at 1733349064226Closed at 1733349064226 2024-12-04T21:51:04,228 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T21:51:04,228 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733349064228"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733349064228"}]},"ts":"1733349064228"} 2024-12-04T21:51:04,230 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T21:51:04,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T21:51:04,232 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733349064231"}]},"ts":"1733349064231"} 2024-12-04T21:51:04,234 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-04T21:51:04,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e9202790fd5e198d06c7429ce2832402, ASSIGN}] 2024-12-04T21:51:04,236 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e9202790fd5e198d06c7429ce2832402, ASSIGN 2024-12-04T21:51:04,237 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e9202790fd5e198d06c7429ce2832402, ASSIGN; state=OFFLINE, location=bb3046a53f79,33309,1733349063370; forceNewPlan=false, retain=false 2024-12-04T21:51:04,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:04,388 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e9202790fd5e198d06c7429ce2832402, regionState=OPENING, regionLocation=bb3046a53f79,33309,1733349063370 2024-12-04T21:51:04,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e9202790fd5e198d06c7429ce2832402, ASSIGN because future has completed 2024-12-04T21:51:04,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9202790fd5e198d06c7429ce2832402, server=bb3046a53f79,33309,1733349063370}] 2024-12-04T21:51:04,555 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,555 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e9202790fd5e198d06c7429ce2832402, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:51:04,556 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,556 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:04,556 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,556 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,559 INFO [StoreOpener-e9202790fd5e198d06c7429ce2832402-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,561 INFO [StoreOpener-e9202790fd5e198d06c7429ce2832402-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9202790fd5e198d06c7429ce2832402 columnFamilyName info 2024-12-04T21:51:04,561 DEBUG [StoreOpener-e9202790fd5e198d06c7429ce2832402-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:04,562 INFO [StoreOpener-e9202790fd5e198d06c7429ce2832402-1 {}] regionserver.HStore(327): Store=e9202790fd5e198d06c7429ce2832402/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:04,562 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,563 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,564 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,565 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,565 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,568 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,572 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:51:04,573 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e9202790fd5e198d06c7429ce2832402; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846696, jitterRate=0.07662974298000336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:51:04,573 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:04,575 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e9202790fd5e198d06c7429ce2832402: Running coprocessor pre-open hook at 1733349064556Writing region info on filesystem at 1733349064556Initializing all the Stores at 1733349064558 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349064558Cleaning up temporary data from old regions at 1733349064565 (+7 ms)Running coprocessor post-open hooks at 1733349064574 (+9 ms)Region opened successfully at 1733349064574 2024-12-04T21:51:04,576 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402., pid=6, masterSystemTime=1733349064547 2024-12-04T21:51:04,579 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,579 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:04,580 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e9202790fd5e198d06c7429ce2832402, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,33309,1733349063370 2024-12-04T21:51:04,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9202790fd5e198d06c7429ce2832402, server=bb3046a53f79,33309,1733349063370 because future has completed 2024-12-04T21:51:04,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T21:51:04,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e9202790fd5e198d06c7429ce2832402, server=bb3046a53f79,33309,1733349063370 in 192 msec 2024-12-04T21:51:04,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T21:51:04,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e9202790fd5e198d06c7429ce2832402, ASSIGN in 355 msec 2024-12-04T21:51:04,594 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T21:51:04,594 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733349064594"}]},"ts":"1733349064594"} 2024-12-04T21:51:04,596 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-04T21:51:04,597 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T21:51:04,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 393 msec 2024-12-04T21:51:04,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:04,832 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-04T21:51:04,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-04T21:51:04,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:04,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:05,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:05,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:05,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:06,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:06,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:06,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:07,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:07,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:07,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:08,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:08,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:08,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:09,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:09,632 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:51:09,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:09,665 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T21:51:09,665 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-04T21:51:09,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:09,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:10,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:10,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:10,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:11,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:11,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:11,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:12,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:12,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:12,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:13,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:13,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:13,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:14,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:51:14,232 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T21:51:14,232 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-04T21:51:14,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:14,235 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:14,238 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402., hostname=bb3046a53f79,33309,1733349063370, seqNum=2] 2024-12-04T21:51:14,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:14,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:14,249 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T21:51:14,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T21:51:14,251 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T21:51:14,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T21:51:14,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:14,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33309 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-04T21:51:14,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:14,421 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing e9202790fd5e198d06c7429ce2832402 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T21:51:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/1a373b6745bb40b4b041d205c9a0427b is 1080, key is row0001/info:/1733349074239/Put/seqid=0 2024-12-04T21:51:14,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741837_1013 (size=6033) 2024-12-04T21:51:14,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741837_1013 (size=6033) 2024-12-04T21:51:14,440 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/1a373b6745bb40b4b041d205c9a0427b 2024-12-04T21:51:14,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/1a373b6745bb40b4b041d205c9a0427b as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/1a373b6745bb40b4b041d205c9a0427b 2024-12-04T21:51:14,451 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/1a373b6745bb40b4b041d205c9a0427b, entries=1, sequenceid=5, filesize=5.9 K 2024-12-04T21:51:14,453 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e9202790fd5e198d06c7429ce2832402 in 31ms, sequenceid=5, compaction requested=false 2024-12-04T21:51:14,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for e9202790fd5e198d06c7429ce2832402: 2024-12-04T21:51:14,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:14,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-04T21:51:14,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-04T21:51:14,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-04T21:51:14,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-12-04T21:51:14,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 215 msec 2024-12-04T21:51:14,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:14,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:15,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:15,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:15,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:16,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:16,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:16,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:17,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:17,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 after 68071ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:51:17,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:17,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:18,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:18,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:18,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:19,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:19,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:19,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:20,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:20,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:20,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:21,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:21,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:21,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:22,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:22,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:22,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:23,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:23,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:23,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:24,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-04T21:51:24,283 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T21:51:24,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:24,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-04T21:51:24,296 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T21:51:24,297 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T21:51:24,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T21:51:24,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:24,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33309 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-04T21:51:24,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:24,453 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing e9202790fd5e198d06c7429ce2832402 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T21:51:24,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/87cb2e0718b84ba3b3ba20f4539a7de6 is 1080, key is row0002/info:/1733349084287/Put/seqid=0 2024-12-04T21:51:24,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741838_1014 (size=6033) 2024-12-04T21:51:24,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741838_1014 (size=6033) 2024-12-04T21:51:24,467 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/87cb2e0718b84ba3b3ba20f4539a7de6 2024-12-04T21:51:24,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/87cb2e0718b84ba3b3ba20f4539a7de6 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/87cb2e0718b84ba3b3ba20f4539a7de6 2024-12-04T21:51:24,480 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/87cb2e0718b84ba3b3ba20f4539a7de6, entries=1, sequenceid=9, filesize=5.9 K 2024-12-04T21:51:24,481 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e9202790fd5e198d06c7429ce2832402 in 28ms, sequenceid=9, compaction requested=false 2024-12-04T21:51:24,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for e9202790fd5e198d06c7429ce2832402: 2024-12-04T21:51:24,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:24,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-04T21:51:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-04T21:51:24,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-04T21:51:24,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-12-04T21:51:24,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-12-04T21:51:24,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:24,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:24,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta after 68060ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:51:24,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 after 68074ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-04T21:51:25,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:25,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:25,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:26,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:26,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:26,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:27,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:27,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:27,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:28,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:28,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:28,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:29,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:29,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:29,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:30,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:30,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:30,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:31,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:31,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:31,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:32,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:32,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:32,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:33,310 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:51:33,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:33,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:33,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:34,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:34,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-04T21:51:34,394 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T21:51:34,401 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C33309%2C1733349063370.1733349094401 2024-12-04T21:51:34,410 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:34,410 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:34,410 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:34,410 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:34,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:34,411 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349063754 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349094401 2024-12-04T21:51:34,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37757:37757),(127.0.0.1/127.0.0.1:34071:34071)] 2024-12-04T21:51:34,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349063754 is not closed yet, will try archiving it next time 2024-12-04T21:51:34,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:34,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741833_1009 (size=5546) 2024-12-04T21:51:34,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741833_1009 (size=5546) 2024-12-04T21:51:34,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:34,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-04T21:51:34,417 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T21:51:34,418 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T21:51:34,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T21:51:34,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33309 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-04T21:51:34,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:34,574 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing e9202790fd5e198d06c7429ce2832402 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T21:51:34,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/ffe74f4432844068b36a4293a878f4eb is 1080, key is row0003/info:/1733349094397/Put/seqid=0 2024-12-04T21:51:34,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741840_1016 (size=6033) 2024-12-04T21:51:34,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741840_1016 (size=6033) 2024-12-04T21:51:34,591 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/ffe74f4432844068b36a4293a878f4eb 2024-12-04T21:51:34,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/ffe74f4432844068b36a4293a878f4eb as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/ffe74f4432844068b36a4293a878f4eb 2024-12-04T21:51:34,605 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/ffe74f4432844068b36a4293a878f4eb, entries=1, sequenceid=13, filesize=5.9 K 2024-12-04T21:51:34,607 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e9202790fd5e198d06c7429ce2832402 in 32ms, sequenceid=13, compaction requested=true 2024-12-04T21:51:34,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for e9202790fd5e198d06c7429ce2832402: 2024-12-04T21:51:34,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:34,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-04T21:51:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-04T21:51:34,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-04T21:51:34,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-12-04T21:51:34,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-12-04T21:51:34,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:34,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:35,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:35,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:35,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:36,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:36,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:36,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:37,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:37,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:37,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:38,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:38,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:38,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:39,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:39,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:39,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:40,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:40,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:40,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:41,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:41,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:41,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:42,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:42,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:42,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:43,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:43,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:43,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:44,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:44,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-04T21:51:44,512 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T21:51:44,512 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:51:44,513 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:51:44,513 DEBUG [Time-limited test {}] regionserver.HStore(1541): e9202790fd5e198d06c7429ce2832402/info is initiating minor compaction (all files) 2024-12-04T21:51:44,514 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:51:44,514 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:44,514 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of e9202790fd5e198d06c7429ce2832402/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:44,514 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/1a373b6745bb40b4b041d205c9a0427b, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/87cb2e0718b84ba3b3ba20f4539a7de6, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/ffe74f4432844068b36a4293a878f4eb] into tmpdir=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp, totalSize=17.7 K 2024-12-04T21:51:44,515 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1a373b6745bb40b4b041d205c9a0427b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733349074239 2024-12-04T21:51:44,515 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 87cb2e0718b84ba3b3ba20f4539a7de6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733349084287 2024-12-04T21:51:44,516 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ffe74f4432844068b36a4293a878f4eb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733349094397 2024-12-04T21:51:44,528 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): e9202790fd5e198d06c7429ce2832402#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:51:44,529 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/23cb1a68f0134dbbb10d65fadc33b586 is 1080, key is row0001/info:/1733349074239/Put/seqid=0 2024-12-04T21:51:44,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741841_1017 (size=8296) 2024-12-04T21:51:44,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741841_1017 (size=8296) 2024-12-04T21:51:44,542 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/23cb1a68f0134dbbb10d65fadc33b586 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/23cb1a68f0134dbbb10d65fadc33b586 2024-12-04T21:51:44,549 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e9202790fd5e198d06c7429ce2832402/info of e9202790fd5e198d06c7429ce2832402 into 23cb1a68f0134dbbb10d65fadc33b586(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:51:44,549 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for e9202790fd5e198d06c7429ce2832402: 2024-12-04T21:51:44,551 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C33309%2C1733349063370.1733349104551 2024-12-04T21:51:44,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:44,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:44,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:44,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:44,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:44,557 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349094401 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349104551 2024-12-04T21:51:44,558 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34071:34071),(127.0.0.1/127.0.0.1:37757:37757)] 2024-12-04T21:51:44,558 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349094401 is not closed yet, will try archiving it next time 2024-12-04T21:51:44,558 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349063754 to hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/oldWALs/bb3046a53f79%2C33309%2C1733349063370.1733349063754 2024-12-04T21:51:44,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741839_1015 (size=2520) 2024-12-04T21:51:44,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:44,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741839_1015 (size=2520) 2024-12-04T21:51:44,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-04T21:51:44,561 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-04T21:51:44,562 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-04T21:51:44,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-04T21:51:44,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33309 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-04T21:51:44,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:44,717 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing e9202790fd5e198d06c7429ce2832402 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T21:51:44,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/fa97cd9ca01d440bb8de93acbd33f854 is 1080, key is row0000/info:/1733349104550/Put/seqid=0 2024-12-04T21:51:44,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741843_1019 (size=6033) 2024-12-04T21:51:44,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741843_1019 (size=6033) 2024-12-04T21:51:44,732 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/fa97cd9ca01d440bb8de93acbd33f854 2024-12-04T21:51:44,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/fa97cd9ca01d440bb8de93acbd33f854 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/fa97cd9ca01d440bb8de93acbd33f854 2024-12-04T21:51:44,742 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/fa97cd9ca01d440bb8de93acbd33f854, entries=1, sequenceid=18, filesize=5.9 K 2024-12-04T21:51:44,744 INFO [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e9202790fd5e198d06c7429ce2832402 in 26ms, sequenceid=18, compaction requested=false 2024-12-04T21:51:44,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for e9202790fd5e198d06c7429ce2832402: 2024-12-04T21:51:44,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:44,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-04T21:51:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-04T21:51:44,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-04T21:51:44,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-12-04T21:51:44,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-12-04T21:51:44,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:44,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:45,008 INFO [master/bb3046a53f79:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-04T21:51:45,008 INFO [master/bb3046a53f79:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-04T21:51:45,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:45,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:45,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:46,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:46,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:46,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:47,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:47,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:47,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:48,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:48,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:48,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:49,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:49,556 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e9202790fd5e198d06c7429ce2832402, had cached 0 bytes from a total of 14329 2024-12-04T21:51:49,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:49,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:50,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:50,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:50,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:51,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:51,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:51,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:52,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:52,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:52,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:53,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:53,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:53,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:54,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:54,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-04T21:51:54,583 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-04T21:51:54,591 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C33309%2C1733349063370.1733349114591 2024-12-04T21:51:54,599 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,599 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,600 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,600 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,600 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,600 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349104551 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349114591 2024-12-04T21:51:54,601 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34071:34071),(127.0.0.1/127.0.0.1:37757:37757)] 2024-12-04T21:51:54,601 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349104551 is not closed yet, will try archiving it next time 2024-12-04T21:51:54,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:51:54,601 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/WALs/bb3046a53f79,33309,1733349063370/bb3046a53f79%2C33309%2C1733349063370.1733349094401 to hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/oldWALs/bb3046a53f79%2C33309%2C1733349063370.1733349094401 2024-12-04T21:51:54,601 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:51:54,601 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:51:54,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:54,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:54,602 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:51:54,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741842_1018 (size=2026) 2024-12-04T21:51:54,602 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:51:54,602 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1198786032, stopped=false 2024-12-04T21:51:54,602 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,33977,1733349063328 2024-12-04T21:51:54,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741842_1018 (size=2026) 2024-12-04T21:51:54,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:51:54,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:51:54,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:54,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:54,604 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:51:54,604 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:51:54,604 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:51:54,605 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:54,605 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:51:54,605 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,33309,1733349063370' ***** 2024-12-04T21:51:54,605 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:51:54,605 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:51:54,605 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:51:54,605 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:51:54,605 INFO [RS:0;bb3046a53f79:33309 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:51:54,605 INFO [RS:0;bb3046a53f79:33309 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:51:54,605 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(3091): Received CLOSE for e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:54,605 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,33309,1733349063370 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:33309. 2024-12-04T21:51:54,606 DEBUG [RS:0;bb3046a53f79:33309 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e9202790fd5e198d06c7429ce2832402, disabling compactions & flushes 2024-12-04T21:51:54,606 DEBUG [RS:0;bb3046a53f79:33309 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:54,606 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. after waiting 0 ms 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:51:54,606 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e9202790fd5e198d06c7429ce2832402 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-04T21:51:54,606 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-04T21:51:54,606 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1325): Online Regions={e9202790fd5e198d06c7429ce2832402=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402., 1588230740=hbase:meta,,1.1588230740} 2024-12-04T21:51:54,606 DEBUG [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e9202790fd5e198d06c7429ce2832402 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:51:54,606 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:51:54,606 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:51:54,606 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-04T21:51:54,610 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/c47cd7ac514b45adbfb3419381c33216 is 1080, key is row0001/info:/1733349114586/Put/seqid=0 2024-12-04T21:51:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741845_1021 (size=6033) 2024-12-04T21:51:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741845_1021 (size=6033) 2024-12-04T21:51:54,615 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/c47cd7ac514b45adbfb3419381c33216 2024-12-04T21:51:54,623 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/.tmp/info/c47cd7ac514b45adbfb3419381c33216 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/c47cd7ac514b45adbfb3419381c33216 2024-12-04T21:51:54,624 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/info/5be88772c2464c6bb69a059a83ed2df6 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402./info:regioninfo/1733349064580/Put/seqid=0 2024-12-04T21:51:54,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741846_1022 (size=7308) 2024-12-04T21:51:54,629 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/c47cd7ac514b45adbfb3419381c33216, entries=1, sequenceid=22, filesize=5.9 K 2024-12-04T21:51:54,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741846_1022 (size=7308) 2024-12-04T21:51:54,630 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/info/5be88772c2464c6bb69a059a83ed2df6 2024-12-04T21:51:54,630 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e9202790fd5e198d06c7429ce2832402 in 24ms, sequenceid=22, compaction requested=true 2024-12-04T21:51:54,630 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/1a373b6745bb40b4b041d205c9a0427b, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/87cb2e0718b84ba3b3ba20f4539a7de6, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/ffe74f4432844068b36a4293a878f4eb] to archive 2024-12-04T21:51:54,631 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T21:51:54,633 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/1a373b6745bb40b4b041d205c9a0427b to hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/1a373b6745bb40b4b041d205c9a0427b 2024-12-04T21:51:54,634 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/87cb2e0718b84ba3b3ba20f4539a7de6 to hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/87cb2e0718b84ba3b3ba20f4539a7de6 2024-12-04T21:51:54,635 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/ffe74f4432844068b36a4293a878f4eb to hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/info/ffe74f4432844068b36a4293a878f4eb 2024-12-04T21:51:54,635 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bb3046a53f79:33977 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T21:51:54,636 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1a373b6745bb40b4b041d205c9a0427b=6033, 87cb2e0718b84ba3b3ba20f4539a7de6=6033, ffe74f4432844068b36a4293a878f4eb=6033] 2024-12-04T21:51:54,639 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e9202790fd5e198d06c7429ce2832402/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-04T21:51:54,640 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:54,640 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e9202790fd5e198d06c7429ce2832402: Waiting for close lock at 1733349114606Running coprocessor pre-close hooks at 1733349114606Disabling compacts and flushes for region at 1733349114606Disabling writes for close at 1733349114606Obtaining lock to block concurrent updates at 1733349114606Preparing flush snapshotting stores in e9202790fd5e198d06c7429ce2832402 at 1733349114606Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733349114606Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. at 1733349114607 (+1 ms)Flushing e9202790fd5e198d06c7429ce2832402/info: creating writer at 1733349114607Flushing e9202790fd5e198d06c7429ce2832402/info: appending metadata at 1733349114610 (+3 ms)Flushing e9202790fd5e198d06c7429ce2832402/info: closing flushed file at 1733349114610Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b18b34b: reopening flushed file at 1733349114622 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e9202790fd5e198d06c7429ce2832402 in 24ms, sequenceid=22, compaction requested=true at 1733349114630 (+8 ms)Writing region close event to WAL at 1733349114636 (+6 ms)Running coprocessor post-close hooks at 1733349114640 (+4 ms)Closed at 1733349114640 2024-12-04T21:51:54,640 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733349064203.e9202790fd5e198d06c7429ce2832402. 2024-12-04T21:51:54,648 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/ns/659f6e0fd82f4215bed3b005bef82445 is 43, key is default/ns:d/1733349064156/Put/seqid=0 2024-12-04T21:51:54,650 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-04T21:51:54,650 INFO [regionserver/bb3046a53f79:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-04T21:51:54,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741847_1023 (size=5153) 2024-12-04T21:51:54,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741847_1023 (size=5153) 2024-12-04T21:51:54,653 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/ns/659f6e0fd82f4215bed3b005bef82445 2024-12-04T21:51:54,670 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/table/5a1df5fdc8dc4bfca06838b6906b6793 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733349064594/Put/seqid=0 2024-12-04T21:51:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741848_1024 (size=5508) 2024-12-04T21:51:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741848_1024 (size=5508) 2024-12-04T21:51:54,675 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/table/5a1df5fdc8dc4bfca06838b6906b6793 2024-12-04T21:51:54,680 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/info/5be88772c2464c6bb69a059a83ed2df6 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/info/5be88772c2464c6bb69a059a83ed2df6 2024-12-04T21:51:54,685 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/info/5be88772c2464c6bb69a059a83ed2df6, entries=10, sequenceid=11, filesize=7.1 K 2024-12-04T21:51:54,686 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/ns/659f6e0fd82f4215bed3b005bef82445 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/ns/659f6e0fd82f4215bed3b005bef82445 2024-12-04T21:51:54,691 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/ns/659f6e0fd82f4215bed3b005bef82445, entries=2, sequenceid=11, filesize=5.0 K 2024-12-04T21:51:54,692 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/.tmp/table/5a1df5fdc8dc4bfca06838b6906b6793 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/table/5a1df5fdc8dc4bfca06838b6906b6793 2024-12-04T21:51:54,698 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/table/5a1df5fdc8dc4bfca06838b6906b6793, entries=2, sequenceid=11, filesize=5.4 K 2024-12-04T21:51:54,699 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=11, compaction requested=false 2024-12-04T21:51:54,703 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-04T21:51:54,703 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:51:54,703 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:51:54,703 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349114606Running coprocessor pre-close hooks at 1733349114606Disabling compacts and flushes for region at 1733349114606Disabling writes for close at 1733349114606Obtaining lock to block concurrent updates at 1733349114606Preparing flush snapshotting stores in 1588230740 at 1733349114606Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733349114607 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733349114607Flushing 1588230740/info: creating writer at 1733349114607Flushing 1588230740/info: appending metadata at 1733349114623 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733349114623Flushing 1588230740/ns: creating writer at 1733349114635 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733349114648 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733349114648Flushing 1588230740/table: creating writer at 1733349114658 (+10 ms)Flushing 1588230740/table: appending metadata at 1733349114670 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733349114670Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a6c180d: reopening flushed file at 1733349114679 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57c856a6: reopening flushed file at 1733349114685 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f457689: reopening flushed file at 1733349114691 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=11, compaction requested=false at 1733349114699 (+8 ms)Writing region close event to WAL at 1733349114700 (+1 ms)Running coprocessor post-close hooks at 1733349114703 (+3 ms)Closed at 1733349114703 2024-12-04T21:51:54,704 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:51:54,806 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,33309,1733349063370; all regions closed. 2024-12-04T21:51:54,807 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,807 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,807 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,807 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,807 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741834_1010 (size=3306) 2024-12-04T21:51:54,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741834_1010 (size=3306) 2024-12-04T21:51:54,813 DEBUG [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/oldWALs 2024-12-04T21:51:54,813 INFO [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C33309%2C1733349063370.meta:.meta(num 1733349064113) 2024-12-04T21:51:54,813 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,813 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,813 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741844_1020 (size=1252) 2024-12-04T21:51:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741844_1020 (size=1252) 2024-12-04T21:51:54,820 DEBUG [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/oldWALs 2024-12-04T21:51:54,820 INFO [RS:0;bb3046a53f79:33309 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C33309%2C1733349063370:(num 1733349114591) 2024-12-04T21:51:54,820 DEBUG [RS:0;bb3046a53f79:33309 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:54,820 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:51:54,821 INFO [RS:0;bb3046a53f79:33309 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:51:54,821 INFO [RS:0;bb3046a53f79:33309 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-04T21:51:54,821 INFO [RS:0;bb3046a53f79:33309 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:51:54,821 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:51:54,821 INFO [RS:0;bb3046a53f79:33309 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33309 2024-12-04T21:51:54,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:51:54,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,33309,1733349063370 2024-12-04T21:51:54,824 INFO [RS:0;bb3046a53f79:33309 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:51:54,825 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,33309,1733349063370] 2024-12-04T21:51:54,826 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,33309,1733349063370 already deleted, retry=false 2024-12-04T21:51:54,826 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,33309,1733349063370 expired; onlineServers=0 2024-12-04T21:51:54,826 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,33977,1733349063328' ***** 2024-12-04T21:51:54,826 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:51:54,827 INFO [M:0;bb3046a53f79:33977 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:51:54,827 INFO [M:0;bb3046a53f79:33977 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:51:54,827 DEBUG [M:0;bb3046a53f79:33977 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:51:54,827 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:51:54,827 DEBUG [M:0;bb3046a53f79:33977 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:51:54,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349063499 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349063499,5,FailOnTimeoutGroup] 2024-12-04T21:51:54,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349063499 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349063499,5,FailOnTimeoutGroup] 2024-12-04T21:51:54,827 INFO [M:0;bb3046a53f79:33977 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:51:54,827 INFO [M:0;bb3046a53f79:33977 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:51:54,827 DEBUG [M:0;bb3046a53f79:33977 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:51:54,827 INFO [M:0;bb3046a53f79:33977 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:51:54,827 INFO [M:0;bb3046a53f79:33977 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:51:54,828 INFO [M:0;bb3046a53f79:33977 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:51:54,828 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:51:54,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:51:54,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:54,829 DEBUG [M:0;bb3046a53f79:33977 {}] zookeeper.ZKUtil(347): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:51:54,829 WARN [M:0;bb3046a53f79:33977 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:51:54,829 INFO [M:0;bb3046a53f79:33977 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/.lastflushedseqids 2024-12-04T21:51:54,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:51:54,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T21:51:54,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-04T21:51:54,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741849_1025 (size=130) 2024-12-04T21:51:54,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741849_1025 (size=130) 2024-12-04T21:51:54,834 INFO [M:0;bb3046a53f79:33977 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:51:54,835 INFO [M:0;bb3046a53f79:33977 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:51:54,835 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:51:54,835 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:54,835 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:54,835 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:51:54,835 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:54,835 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-12-04T21:51:54,848 DEBUG [M:0;bb3046a53f79:33977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/254234665d914363b0719a532607a7f8 is 82, key is hbase:meta,,1/info:regioninfo/1733349064143/Put/seqid=0 2024-12-04T21:51:54,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741850_1026 (size=5672) 2024-12-04T21:51:54,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741850_1026 (size=5672) 2024-12-04T21:51:54,853 INFO [M:0;bb3046a53f79:33977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/254234665d914363b0719a532607a7f8 2024-12-04T21:51:54,871 DEBUG [M:0;bb3046a53f79:33977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a382e8c68e5450fa30bce1376c2d607 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733349064598/Put/seqid=0 2024-12-04T21:51:54,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741851_1027 (size=7825) 2024-12-04T21:51:54,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741851_1027 (size=7825) 2024-12-04T21:51:54,876 INFO [M:0;bb3046a53f79:33977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a382e8c68e5450fa30bce1376c2d607 2024-12-04T21:51:54,880 INFO [M:0;bb3046a53f79:33977 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3a382e8c68e5450fa30bce1376c2d607 2024-12-04T21:51:54,891 DEBUG [M:0;bb3046a53f79:33977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/714972340e8348ceb7ec06d130da4c30 is 69, key is bb3046a53f79,33309,1733349063370/rs:state/1733349063612/Put/seqid=0 2024-12-04T21:51:54,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741852_1028 (size=5156) 2024-12-04T21:51:54,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741852_1028 (size=5156) 2024-12-04T21:51:54,896 INFO [M:0;bb3046a53f79:33977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/714972340e8348ceb7ec06d130da4c30 2024-12-04T21:51:54,914 DEBUG [M:0;bb3046a53f79:33977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c1b887e4b5f54cd3bccffaedf12ff7d0 is 52, key is load_balancer_on/state:d/1733349064198/Put/seqid=0 2024-12-04T21:51:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741853_1029 (size=5056) 2024-12-04T21:51:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741853_1029 (size=5056) 2024-12-04T21:51:54,918 INFO [M:0;bb3046a53f79:33977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c1b887e4b5f54cd3bccffaedf12ff7d0 2024-12-04T21:51:54,923 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/254234665d914363b0719a532607a7f8 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/254234665d914363b0719a532607a7f8 2024-12-04T21:51:54,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:51:54,925 INFO [RS:0;bb3046a53f79:33309 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:51:54,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33309-0x100a73716ff0001, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:51:54,925 INFO [RS:0;bb3046a53f79:33309 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,33309,1733349063370; zookeeper connection closed. 2024-12-04T21:51:54,926 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@260742b5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@260742b5 2024-12-04T21:51:54,926 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T21:51:54,929 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/254234665d914363b0719a532607a7f8, entries=8, sequenceid=121, filesize=5.5 K 2024-12-04T21:51:54,930 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a382e8c68e5450fa30bce1376c2d607 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a382e8c68e5450fa30bce1376c2d607 2024-12-04T21:51:54,935 INFO [M:0;bb3046a53f79:33977 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3a382e8c68e5450fa30bce1376c2d607 2024-12-04T21:51:54,935 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a382e8c68e5450fa30bce1376c2d607, entries=14, sequenceid=121, filesize=7.6 K 2024-12-04T21:51:54,936 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/714972340e8348ceb7ec06d130da4c30 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/714972340e8348ceb7ec06d130da4c30 2024-12-04T21:51:54,941 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/714972340e8348ceb7ec06d130da4c30, entries=1, sequenceid=121, filesize=5.0 K 2024-12-04T21:51:54,942 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c1b887e4b5f54cd3bccffaedf12ff7d0 as hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c1b887e4b5f54cd3bccffaedf12ff7d0 2024-12-04T21:51:54,947 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38849/user/jenkins/test-data/db49b8e8-4704-4608-5768-3fb0b600f7bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c1b887e4b5f54cd3bccffaedf12ff7d0, entries=1, sequenceid=121, filesize=4.9 K 2024-12-04T21:51:54,948 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=121, compaction requested=false 2024-12-04T21:51:54,949 INFO [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:54,949 DEBUG [M:0;bb3046a53f79:33977 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349114835Disabling compacts and flushes for region at 1733349114835Disabling writes for close at 1733349114835Obtaining lock to block concurrent updates at 1733349114835Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733349114835Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1733349114835Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733349114836 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733349114836Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733349114847 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733349114848 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733349114857 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733349114870 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733349114870Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733349114880 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733349114891 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733349114891Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733349114901 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733349114913 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733349114913Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ce27256: reopening flushed file at 1733349114922 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cc9f06c: reopening flushed file at 1733349114929 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55457a0a: reopening flushed file at 1733349114935 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cc5c992: reopening flushed file at 1733349114941 (+6 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=121, compaction requested=false at 1733349114948 (+7 ms)Writing region close event to WAL at 1733349114949 (+1 ms)Closed at 1733349114949 2024-12-04T21:51:54,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,950 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,950 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,950 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,950 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:51:54,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46585 is added to blk_1073741830_1006 (size=53056) 2024-12-04T21:51:54,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40085 is added to blk_1073741830_1006 (size=53056) 2024-12-04T21:51:54,952 INFO [M:0;bb3046a53f79:33977 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:51:54,952 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:51:54,952 INFO [M:0;bb3046a53f79:33977 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33977 2024-12-04T21:51:54,953 INFO [M:0;bb3046a53f79:33977 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:51:54,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:54,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:55,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:51:55,054 INFO [M:0;bb3046a53f79:33977 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:51:55,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33977-0x100a73716ff0000, quorum=127.0.0.1:54713, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:51:55,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@594a0329{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:55,058 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fc8e768{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:51:55,059 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:51:55,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68282ab6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:51:55,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76a9ae4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir/,STOPPED} 2024-12-04T21:51:55,062 WARN [BP-589344384-172.17.0.2-1733349062783 heartbeating to localhost/127.0.0.1:38849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:51:55,062 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:51:55,062 WARN [BP-589344384-172.17.0.2-1733349062783 heartbeating to localhost/127.0.0.1:38849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-589344384-172.17.0.2-1733349062783 (Datanode Uuid ddae31d1-22d6-4563-8ca0-e1244be38825) service to localhost/127.0.0.1:38849 2024-12-04T21:51:55,062 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:51:55,063 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data3/current/BP-589344384-172.17.0.2-1733349062783 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:55,063 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data4/current/BP-589344384-172.17.0.2-1733349062783 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:55,064 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:51:55,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28e075ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:55,067 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f0c0d1b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:51:55,067 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:51:55,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cb8a90e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:51:55,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cb73ec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir/,STOPPED} 2024-12-04T21:51:55,068 WARN [BP-589344384-172.17.0.2-1733349062783 heartbeating to localhost/127.0.0.1:38849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:51:55,068 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:51:55,068 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:51:55,068 WARN [BP-589344384-172.17.0.2-1733349062783 heartbeating to localhost/127.0.0.1:38849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-589344384-172.17.0.2-1733349062783 (Datanode Uuid e8fc9aff-6b1a-44e9-bdf1-233e32b122b8) service to localhost/127.0.0.1:38849 2024-12-04T21:51:55,069 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data1/current/BP-589344384-172.17.0.2-1733349062783 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:55,069 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/cluster_cc63017e-026a-1393-21bf-5e2898ef4164/data/data2/current/BP-589344384-172.17.0.2-1733349062783 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:51:55,069 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:51:55,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3db8de80{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:51:55,075 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f36b22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:51:55,075 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:51:55,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28ceda60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:51:55,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b7eaed2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir/,STOPPED} 2024-12-04T21:51:55,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:51:55,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:51:55,105 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 182) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38849 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38849 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38849 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38849 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38849 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=88 (was 99), ProcessCount=11 (was 11), AvailableMemoryMB=2155 (was 2278) 2024-12-04T21:51:55,112 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=88, ProcessCount=11, AvailableMemoryMB=2155 2024-12-04T21:51:55,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:51:55,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.log.dir so I do NOT create it in target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae 2024-12-04T21:51:55,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0ca98209-304e-864a-9bf0-7c20280cb71d/hadoop.tmp.dir so I do NOT create it in target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85, deleteOnExit=true 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/test.cache.data in system properties and HBase conf 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:51:55,113 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:51:55,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:51:55,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:51:55,126 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:51:55,164 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:55,167 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:51:55,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:51:55,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:51:55,168 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:51:55,169 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:55,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f3519b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:51:55,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@367e9406{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:51:55,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4aca7f10{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/java.io.tmpdir/jetty-localhost-45033-hadoop-hdfs-3_4_1-tests_jar-_-any-18275518548825820048/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:51:55,262 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6deead78{HTTP/1.1, (http/1.1)}{localhost:45033} 2024-12-04T21:51:55,262 INFO [Time-limited test {}] server.Server(415): Started @236192ms 2024-12-04T21:51:55,273 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:51:55,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:55,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:51:55,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:51:55,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:51:55,354 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:51:55,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d5d7e10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:51:55,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46455419{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:51:55,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:55,450 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5741d7ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/java.io.tmpdir/jetty-localhost-32911-hadoop-hdfs-3_4_1-tests_jar-_-any-5832554073751764660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:55,450 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d2ea6ab{HTTP/1.1, (http/1.1)}{localhost:32911} 2024-12-04T21:51:55,450 INFO [Time-limited test {}] server.Server(415): Started @236380ms 2024-12-04T21:51:55,451 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:51:55,474 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:51:55,476 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:51:55,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:51:55,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:51:55,477 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:51:55,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67cee3f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:51:55,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@de978e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:51:55,502 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data2/current/BP-42441024-172.17.0.2-1733349115130/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:55,502 WARN [Thread-1954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data1/current/BP-42441024-172.17.0.2-1733349115130/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:55,521 WARN [Thread-1933 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:51:55,523 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95eb524b193396be with lease ID 0x8c0f60df288dff90: Processing first storage report for DS-3ce86709-0444-4902-af45-5bd5ffd17bcd from datanode DatanodeRegistration(127.0.0.1:38497, datanodeUuid=890c857c-9d13-43e5-993d-2d277d675b37, infoPort=33067, infoSecurePort=0, ipcPort=38041, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130) 2024-12-04T21:51:55,523 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95eb524b193396be with lease ID 0x8c0f60df288dff90: from storage DS-3ce86709-0444-4902-af45-5bd5ffd17bcd node DatanodeRegistration(127.0.0.1:38497, datanodeUuid=890c857c-9d13-43e5-993d-2d277d675b37, infoPort=33067, infoSecurePort=0, ipcPort=38041, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:55,523 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95eb524b193396be with lease ID 0x8c0f60df288dff90: Processing first storage report for DS-26a6a96d-5f56-4700-a2b6-3853f6b81491 from datanode DatanodeRegistration(127.0.0.1:38497, datanodeUuid=890c857c-9d13-43e5-993d-2d277d675b37, infoPort=33067, infoSecurePort=0, ipcPort=38041, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130) 2024-12-04T21:51:55,523 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95eb524b193396be with lease ID 0x8c0f60df288dff90: from storage DS-26a6a96d-5f56-4700-a2b6-3853f6b81491 node DatanodeRegistration(127.0.0.1:38497, datanodeUuid=890c857c-9d13-43e5-993d-2d277d675b37, infoPort=33067, infoSecurePort=0, ipcPort=38041, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:55,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d113b0b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/java.io.tmpdir/jetty-localhost-42763-hadoop-hdfs-3_4_1-tests_jar-_-any-17136102033838808754/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:51:55,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55cf2d5e{HTTP/1.1, (http/1.1)}{localhost:42763} 2024-12-04T21:51:55,572 INFO [Time-limited test {}] server.Server(415): Started @236502ms 2024-12-04T21:51:55,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:51:55,625 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:51:55,625 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data3/current/BP-42441024-172.17.0.2-1733349115130/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:55,625 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data4/current/BP-42441024-172.17.0.2-1733349115130/current, will proceed with Du for space computation calculation, 2024-12-04T21:51:55,642 WARN [Thread-1969 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:51:55,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf37e07bb42e43bb with lease ID 0x8c0f60df288dff91: Processing first storage report for DS-4349b4f0-c535-4e17-971c-2f4276b0e702 from datanode DatanodeRegistration(127.0.0.1:36595, datanodeUuid=442bddc6-ad31-4d5d-9f07-1a08e8561321, infoPort=37687, infoSecurePort=0, ipcPort=39763, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130) 2024-12-04T21:51:55,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf37e07bb42e43bb with lease ID 0x8c0f60df288dff91: from storage DS-4349b4f0-c535-4e17-971c-2f4276b0e702 node DatanodeRegistration(127.0.0.1:36595, datanodeUuid=442bddc6-ad31-4d5d-9f07-1a08e8561321, infoPort=37687, infoSecurePort=0, ipcPort=39763, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-04T21:51:55,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf37e07bb42e43bb with lease ID 0x8c0f60df288dff91: Processing first storage report for DS-95f78568-db41-471a-8439-b0d68e016e32 from datanode DatanodeRegistration(127.0.0.1:36595, datanodeUuid=442bddc6-ad31-4d5d-9f07-1a08e8561321, infoPort=37687, infoSecurePort=0, ipcPort=39763, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130) 2024-12-04T21:51:55,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf37e07bb42e43bb with lease ID 0x8c0f60df288dff91: from storage DS-95f78568-db41-471a-8439-b0d68e016e32 node DatanodeRegistration(127.0.0.1:36595, datanodeUuid=442bddc6-ad31-4d5d-9f07-1a08e8561321, infoPort=37687, infoSecurePort=0, ipcPort=39763, storageInfo=lv=-57;cid=testClusterID;nsid=471451399;c=1733349115130), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:51:55,693 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae 2024-12-04T21:51:55,700 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/zookeeper_0, clientPort=57789, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:51:55,701 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57789 2024-12-04T21:51:55,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,704 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:51:55,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:51:55,715 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362 with version=8 2024-12-04T21:51:55,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:51:55,717 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:51:55,717 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:51:55,718 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45219 2024-12-04T21:51:55,719 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45219 connecting to ZooKeeper ensemble=127.0.0.1:57789 2024-12-04T21:51:55,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452190x0, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:51:55,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45219-0x100a737e39f0000 connected 2024-12-04T21:51:55,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,741 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,743 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:51:55,743 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362, hbase.cluster.distributed=false 2024-12-04T21:51:55,745 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:51:55,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45219 2024-12-04T21:51:55,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45219 2024-12-04T21:51:55,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45219 2024-12-04T21:51:55,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45219 2024-12-04T21:51:55,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45219 2024-12-04T21:51:55,759 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:51:55,759 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:51:55,760 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46213 2024-12-04T21:51:55,761 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46213 connecting to ZooKeeper ensemble=127.0.0.1:57789 2024-12-04T21:51:55,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,762 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462130x0, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:51:55,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:51:55,766 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46213-0x100a737e39f0001 connected 2024-12-04T21:51:55,766 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:51:55,766 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:51:55,767 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:51:55,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:51:55,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46213 2024-12-04T21:51:55,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46213 2024-12-04T21:51:55,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46213 2024-12-04T21:51:55,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46213 2024-12-04T21:51:55,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46213 2024-12-04T21:51:55,779 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:45219 2024-12-04T21:51:55,780 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:55,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:55,781 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:51:55,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,782 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:51:55,783 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,45219,1733349115717 from backup master directory 2024-12-04T21:51:55,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:55,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:51:55,783 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:51:55,783 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,787 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/hbase.id] with ID: c5b83e5a-2f51-46fb-b86d-1e83638a3625 2024-12-04T21:51:55,787 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/.tmp/hbase.id 2024-12-04T21:51:55,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:51:55,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:51:55,792 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/.tmp/hbase.id]:[hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/hbase.id] 2024-12-04T21:51:55,802 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:55,802 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:51:55,803 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T21:51:55,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:51:55,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:51:55,812 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:51:55,813 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:51:55,813 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:51:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:51:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:51:55,820 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store 2024-12-04T21:51:55,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:51:55,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:51:55,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:55,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:51:55,827 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:55,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:55,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:51:55,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:55,827 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:51:55,827 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349115827Disabling compacts and flushes for region at 1733349115827Disabling writes for close at 1733349115827Writing region close event to WAL at 1733349115827Closed at 1733349115827 2024-12-04T21:51:55,828 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/.initializing 2024-12-04T21:51:55,828 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/WALs/bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,830 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C45219%2C1733349115717, suffix=, logDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/WALs/bb3046a53f79,45219,1733349115717, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/oldWALs, maxLogs=10 2024-12-04T21:51:55,830 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C45219%2C1733349115717.1733349115830 2024-12-04T21:51:55,834 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/WALs/bb3046a53f79,45219,1733349115717/bb3046a53f79%2C45219%2C1733349115717.1733349115830 2024-12-04T21:51:55,834 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:33067:33067)] 2024-12-04T21:51:55,835 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:51:55,835 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:55,835 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,835 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,836 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:51:55,837 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:55,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:51:55,839 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:55,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:51:55,840 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:55,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:51:55,841 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:55,842 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,843 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,843 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,844 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,844 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,845 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:51:55,846 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:51:55,848 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:51:55,848 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815094, jitterRate=0.03644648194313049}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:51:55,849 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733349115835Initializing all the Stores at 1733349115836 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349115836Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349115836Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349115836Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349115836Cleaning up temporary data from old regions at 1733349115844 (+8 ms)Region opened successfully at 1733349115849 (+5 ms) 2024-12-04T21:51:55,849 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:51:55,852 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e6b3f3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:51:55,853 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:51:55,853 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:51:55,853 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:51:55,853 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:51:55,853 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T21:51:55,854 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T21:51:55,854 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:51:55,858 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:51:55,859 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:51:55,860 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:51:55,860 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:51:55,861 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:51:55,861 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:51:55,861 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:51:55,862 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:51:55,863 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:51:55,864 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:51:55,864 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:51:55,866 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:51:55,866 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:51:55,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:51:55,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:51:55,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,868 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,45219,1733349115717, sessionid=0x100a737e39f0000, setting cluster-up flag (Was=false) 2024-12-04T21:51:55,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,872 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:51:55,872 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:55,876 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:51:55,877 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,45219,1733349115717 2024-12-04T21:51:55,878 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:51:55,879 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:55,880 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:51:55,880 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:51:55,880 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,45219,1733349115717 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:51:55,881 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:55,885 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:55,885 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:51:55,886 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,886 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:51:55,889 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733349145889 2024-12-04T21:51:55,889 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:51:55,889 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:51:55,890 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:51:55,891 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:51:55,891 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:51:55,893 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349115891,5,FailOnTimeoutGroup] 2024-12-04T21:51:55,893 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349115893,5,FailOnTimeoutGroup] 2024-12-04T21:51:55,893 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:55,893 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:51:55,893 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:55,893 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:55,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:51:55,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:51:55,896 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:51:55,897 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362 2024-12-04T21:51:55,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:51:55,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:51:55,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:55,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:51:55,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:51:55,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:55,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:51:55,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:51:55,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:55,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:51:55,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:51:55,912 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:55,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:51:55,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:51:55,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:55,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:55,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:51:55,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740 2024-12-04T21:51:55,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740 2024-12-04T21:51:55,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:51:55,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:51:55,916 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:51:55,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:51:55,918 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:51:55,919 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787961, jitterRate=0.0019453763961791992}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:51:55,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733349115906Initializing all the Stores at 1733349115907 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349115907Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349115907Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349115907Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349115907Cleaning up temporary data from old regions at 1733349115915 (+8 ms)Region opened successfully at 1733349115919 (+4 ms) 2024-12-04T21:51:55,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:51:55,919 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:51:55,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:51:55,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:51:55,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:51:55,920 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:51:55,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349115919Disabling compacts and flushes for region at 1733349115919Disabling writes for close at 1733349115919Writing region close event to WAL at 1733349115920 (+1 ms)Closed at 1733349115920 2024-12-04T21:51:55,921 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:55,921 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:51:55,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:51:55,922 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:51:55,923 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:51:55,972 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(746): ClusterId : c5b83e5a-2f51-46fb-b86d-1e83638a3625 2024-12-04T21:51:55,972 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:51:55,976 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:51:55,976 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:51:55,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:55,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:55,979 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:51:55,980 DEBUG [RS:0;bb3046a53f79:46213 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3023a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:51:55,995 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:46213 2024-12-04T21:51:55,995 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:51:55,995 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:51:55,995 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:51:55,996 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,45219,1733349115717 with port=46213, startcode=1733349115759 2024-12-04T21:51:55,996 DEBUG [RS:0;bb3046a53f79:46213 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:51:55,998 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42643, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:51:55,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45219 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,46213,1733349115759 2024-12-04T21:51:55,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45219 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,000 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362 2024-12-04T21:51:56,000 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36245 2024-12-04T21:51:56,000 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:51:56,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:51:56,001 DEBUG [RS:0;bb3046a53f79:46213 {}] zookeeper.ZKUtil(111): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,001 WARN [RS:0;bb3046a53f79:46213 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:51:56,001 INFO [RS:0;bb3046a53f79:46213 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:51:56,001 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,002 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,46213,1733349115759] 2024-12-04T21:51:56,004 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:51:56,006 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:51:56,006 INFO [RS:0;bb3046a53f79:46213 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:51:56,006 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,006 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:51:56,007 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:51:56,007 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:51:56,007 DEBUG [RS:0;bb3046a53f79:46213 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:51:56,008 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,008 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,008 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,008 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,008 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,008 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46213,1733349115759-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:51:56,019 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:51:56,020 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,46213,1733349115759-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,020 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,020 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.Replication(171): bb3046a53f79,46213,1733349115759 started 2024-12-04T21:51:56,031 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,031 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,46213,1733349115759, RpcServer on bb3046a53f79/172.17.0.2:46213, sessionid=0x100a737e39f0001 2024-12-04T21:51:56,031 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:51:56,031 DEBUG [RS:0;bb3046a53f79:46213 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,031 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,46213,1733349115759' 2024-12-04T21:51:56,031 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:51:56,031 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,46213,1733349115759' 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:51:56,032 DEBUG [RS:0;bb3046a53f79:46213 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:51:56,032 INFO [RS:0;bb3046a53f79:46213 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:51:56,032 INFO [RS:0;bb3046a53f79:46213 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:51:56,074 WARN [bb3046a53f79:45219 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:51:56,136 INFO [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46213%2C1733349115759, suffix=, logDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/oldWALs, maxLogs=32 2024-12-04T21:51:56,137 INFO [RS:0;bb3046a53f79:46213 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46213%2C1733349115759.1733349116136 2024-12-04T21:51:56,146 INFO [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349116136 2024-12-04T21:51:56,148 DEBUG [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33067:33067),(127.0.0.1/127.0.0.1:37687:37687)] 2024-12-04T21:51:56,324 DEBUG [bb3046a53f79:45219 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:51:56,325 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,329 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,46213,1733349115759, state=OPENING 2024-12-04T21:51:56,331 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:51:56,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:56,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:51:56,335 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:56,335 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:51:56,335 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:56,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,46213,1733349115759}] 2024-12-04T21:51:56,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:56,488 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:51:56,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37289, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:51:56,495 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:51:56,495 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:51:56,498 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C46213%2C1733349115759.meta, suffix=.meta, logDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759, archiveDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/oldWALs, maxLogs=32 2024-12-04T21:51:56,499 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46213%2C1733349115759.meta.1733349116498.meta 2024-12-04T21:51:56,504 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.meta.1733349116498.meta 2024-12-04T21:51:56,509 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:33067:33067)] 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:51:56,512 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:51:56,512 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:51:56,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:51:56,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:51:56,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:56,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:56,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:51:56,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:51:56,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:56,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:56,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:51:56,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:51:56,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:56,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:56,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:51:56,517 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:51:56,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:56,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:51:56,518 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:51:56,518 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740 2024-12-04T21:51:56,519 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740 2024-12-04T21:51:56,520 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:51:56,520 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:51:56,520 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:51:56,521 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:51:56,522 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737431, jitterRate=-0.06230853497982025}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:51:56,522 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:51:56,523 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733349116512Writing region info on filesystem at 1733349116512Initializing all the Stores at 1733349116513 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349116513Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349116513Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349116513Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349116513Cleaning up temporary data from old regions at 1733349116520 (+7 ms)Running coprocessor post-open hooks at 1733349116522 (+2 ms)Region opened successfully at 1733349116523 (+1 ms) 2024-12-04T21:51:56,524 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733349116488 2024-12-04T21:51:56,525 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:51:56,526 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:51:56,526 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,527 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,46213,1733349115759, state=OPEN 2024-12-04T21:51:56,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:51:56,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:51:56,529 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:56,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:51:56,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:51:56,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,46213,1733349115759 in 194 msec 2024-12-04T21:51:56,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:51:56,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-12-04T21:51:56,533 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:51:56,533 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:51:56,535 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:51:56,535 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,46213,1733349115759, seqNum=-1] 2024-12-04T21:51:56,535 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:51:56,536 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36005, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:51:56,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-12-04T21:51:56,541 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733349116541, completionTime=-1 2024-12-04T21:51:56,541 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:51:56,541 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:51:56,542 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:51:56,542 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733349176542 2024-12-04T21:51:56,542 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349236542 2024-12-04T21:51:56,542 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T21:51:56,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,45219,1733349115717-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,45219,1733349115717-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,45219,1733349115717-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:45219, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,543 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,544 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:51:56,546 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.763sec 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,45219,1733349115717-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:51:56,547 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,45219,1733349115717-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:51:56,549 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:51:56,549 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:51:56,550 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,45219,1733349115717-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:51:56,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c0fa0ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:51:56,572 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,45219,-1 for getting cluster id 2024-12-04T21:51:56,572 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:51:56,574 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c5b83e5a-2f51-46fb-b86d-1e83638a3625' 2024-12-04T21:51:56,574 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:51:56,574 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c5b83e5a-2f51-46fb-b86d-1e83638a3625" 2024-12-04T21:51:56,575 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605d3a34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:51:56,575 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,45219,-1] 2024-12-04T21:51:56,575 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:51:56,575 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:51:56,577 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:51:56,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79aff83f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:51:56,578 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:51:56,580 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,46213,1733349115759, seqNum=-1] 2024-12-04T21:51:56,580 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:51:56,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58462, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:51:56,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,45219,1733349115717 2024-12-04T21:51:56,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:51:56,586 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:51:56,587 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-04T21:51:56,588 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is bb3046a53f79,45219,1733349115717 2024-12-04T21:51:56,588 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14a7d87a 2024-12-04T21:51:56,588 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-04T21:51:56,589 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-04T21:51:56,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-04T21:51:56,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-04T21:51:56,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:51:56,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-04T21:51:56,593 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-04T21:51:56,593 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:56,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-04T21:51:56,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-04T21:51:56,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:51:56,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741835_1011 (size=381) 2024-12-04T21:51:56,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741835_1011 (size=381) 2024-12-04T21:51:56,604 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4185395759ca66fa95c987c9f7e36030, NAME => 'TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362 2024-12-04T21:51:56,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741836_1012 (size=64) 2024-12-04T21:51:56,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741836_1012 (size=64) 2024-12-04T21:51:56,609 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:56,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4185395759ca66fa95c987c9f7e36030, disabling compactions & flushes 2024-12-04T21:51:56,610 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. after waiting 0 ms 2024-12-04T21:51:56,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,610 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4185395759ca66fa95c987c9f7e36030: Waiting for close lock at 1733349116610Disabling compacts and flushes for region at 1733349116610Disabling writes for close at 1733349116610Writing region close event to WAL at 1733349116610Closed at 1733349116610 2024-12-04T21:51:56,611 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-04T21:51:56,611 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733349116611"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733349116611"}]},"ts":"1733349116611"} 2024-12-04T21:51:56,613 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-04T21:51:56,614 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-04T21:51:56,614 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733349116614"}]},"ts":"1733349116614"} 2024-12-04T21:51:56,616 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-04T21:51:56,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, ASSIGN}] 2024-12-04T21:51:56,618 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, ASSIGN 2024-12-04T21:51:56,619 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, ASSIGN; state=OFFLINE, location=bb3046a53f79,46213,1733349115759; forceNewPlan=false, retain=false 2024-12-04T21:51:56,770 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4185395759ca66fa95c987c9f7e36030, regionState=OPENING, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, ASSIGN because future has completed 2024-12-04T21:51:56,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4185395759ca66fa95c987c9f7e36030, server=bb3046a53f79,46213,1733349115759}] 2024-12-04T21:51:56,941 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,941 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4185395759ca66fa95c987c9f7e36030, NAME => 'TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:51:56,942 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,942 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:51:56,942 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,943 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,945 INFO [StoreOpener-4185395759ca66fa95c987c9f7e36030-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,946 INFO [StoreOpener-4185395759ca66fa95c987c9f7e36030-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4185395759ca66fa95c987c9f7e36030 columnFamilyName info 2024-12-04T21:51:56,946 DEBUG [StoreOpener-4185395759ca66fa95c987c9f7e36030-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:51:56,947 INFO [StoreOpener-4185395759ca66fa95c987c9f7e36030-1 {}] regionserver.HStore(327): Store=4185395759ca66fa95c987c9f7e36030/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:51:56,947 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,947 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,948 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,948 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,948 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,950 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,952 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:51:56,952 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4185395759ca66fa95c987c9f7e36030; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825950, jitterRate=0.0502503365278244}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:51:56,952 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:51:56,953 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4185395759ca66fa95c987c9f7e36030: Running coprocessor pre-open hook at 1733349116943Writing region info on filesystem at 1733349116943Initializing all the Stores at 1733349116944 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349116945 (+1 ms)Cleaning up temporary data from old regions at 1733349116948 (+3 ms)Running coprocessor post-open hooks at 1733349116952 (+4 ms)Region opened successfully at 1733349116953 (+1 ms) 2024-12-04T21:51:56,954 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., pid=6, masterSystemTime=1733349116933 2024-12-04T21:51:56,956 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,956 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:51:56,956 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4185395759ca66fa95c987c9f7e36030, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:51:56,958 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4185395759ca66fa95c987c9f7e36030, server=bb3046a53f79,46213,1733349115759 because future has completed 2024-12-04T21:51:56,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-04T21:51:56,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4185395759ca66fa95c987c9f7e36030, server=bb3046a53f79,46213,1733349115759 in 183 msec 2024-12-04T21:51:56,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-04T21:51:56,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, ASSIGN in 346 msec 2024-12-04T21:51:56,965 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-04T21:51:56,965 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733349116965"}]},"ts":"1733349116965"} 2024-12-04T21:51:56,967 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-04T21:51:56,969 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-04T21:51:56,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 379 msec 2024-12-04T21:51:56,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:56,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:57,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:57,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:57,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:58,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:58,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:58,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:59,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:59,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:51:59,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:51:59,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:00,171 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:52:00,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:00,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:00,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:00,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:01,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:01,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:01,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:02,005 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-04T21:52:02,006 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-04T21:52:02,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:02,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:02,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:03,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:03,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:03,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:04,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:04,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T21:52:04,832 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-04T21:52:04,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:52:04,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-04T21:52:04,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-04T21:52:04,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-04T21:52:04,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:04,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:05,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:05,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:05,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:06,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:06,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45219 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-04T21:52:06,614 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-04T21:52:06,614 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-04T21:52:06,621 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-04T21:52:06,621 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:06,624 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., hostname=bb3046a53f79,46213,1733349115759, seqNum=2] 2024-12-04T21:52:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:06,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:52:06,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/514ad2b7cf1946fea38e375a4ded3694 is 1080, key is row0001/info:/1733349126626/Put/seqid=0 2024-12-04T21:52:06,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741837_1013 (size=12509) 2024-12-04T21:52:06,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741837_1013 (size=12509) 2024-12-04T21:52:06,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/514ad2b7cf1946fea38e375a4ded3694 2024-12-04T21:52:06,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/514ad2b7cf1946fea38e375a4ded3694 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/514ad2b7cf1946fea38e375a4ded3694 2024-12-04T21:52:06,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/514ad2b7cf1946fea38e375a4ded3694, entries=7, sequenceid=11, filesize=12.2 K 2024-12-04T21:52:06,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 4185395759ca66fa95c987c9f7e36030 in 36ms, sequenceid=11, compaction requested=false 2024-12-04T21:52:06,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:06,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-04T21:52:06,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/98f26eb26be94afcb8f8c87bb4fab216 is 1080, key is row0008/info:/1733349126645/Put/seqid=0 2024-12-04T21:52:06,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741838_1014 (size=26530) 2024-12-04T21:52:06,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741838_1014 (size=26530) 2024-12-04T21:52:06,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/98f26eb26be94afcb8f8c87bb4fab216 2024-12-04T21:52:06,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/98f26eb26be94afcb8f8c87bb4fab216 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216 2024-12-04T21:52:06,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216, entries=20, sequenceid=34, filesize=25.9 K 2024-12-04T21:52:06,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 4185395759ca66fa95c987c9f7e36030 in 22ms, sequenceid=34, compaction requested=false 2024-12-04T21:52:06,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:06,704 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-12-04T21:52:06,704 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:06,704 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216 because midkey is the same as first or last row 2024-12-04T21:52:06,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:06,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:07,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:07,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:07,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:08,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:08,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:52:08,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/764d02bd376d493585246d69e5735de3 is 1080, key is row0028/info:/1733349126683/Put/seqid=0 2024-12-04T21:52:08,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741839_1015 (size=12509) 2024-12-04T21:52:08,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741839_1015 (size=12509) 2024-12-04T21:52:08,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/764d02bd376d493585246d69e5735de3 2024-12-04T21:52:08,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/764d02bd376d493585246d69e5735de3 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/764d02bd376d493585246d69e5735de3 2024-12-04T21:52:08,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/764d02bd376d493585246d69e5735de3, entries=7, sequenceid=44, filesize=12.2 K 2024-12-04T21:52:08,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 4185395759ca66fa95c987c9f7e36030 in 23ms, sequenceid=44, compaction requested=true 2024-12-04T21:52:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216 because midkey is the same as first or last row 2024-12-04T21:52:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4185395759ca66fa95c987c9f7e36030:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:08,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:08,728 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:08,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-04T21:52:08,729 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:08,729 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): 4185395759ca66fa95c987c9f7e36030/info is initiating minor compaction (all files) 2024-12-04T21:52:08,729 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4185395759ca66fa95c987c9f7e36030/info in TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:08,729 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/514ad2b7cf1946fea38e375a4ded3694, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/764d02bd376d493585246d69e5735de3] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp, totalSize=50.3 K 2024-12-04T21:52:08,729 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 514ad2b7cf1946fea38e375a4ded3694, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733349126626 2024-12-04T21:52:08,730 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98f26eb26be94afcb8f8c87bb4fab216, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733349126645 2024-12-04T21:52:08,730 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 764d02bd376d493585246d69e5735de3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733349126683 2024-12-04T21:52:08,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/8ccb27d78a6b4fb18ab7d5a205816cec is 1080, key is row0035/info:/1733349128705/Put/seqid=0 2024-12-04T21:52:08,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741840_1016 (size=18987) 2024-12-04T21:52:08,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741840_1016 (size=18987) 2024-12-04T21:52:08,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/8ccb27d78a6b4fb18ab7d5a205816cec 2024-12-04T21:52:08,742 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4185395759ca66fa95c987c9f7e36030#info#compaction#58 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:08,743 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/c5653b30399f4d369151f9be5a87c9a2 is 1080, key is row0001/info:/1733349126626/Put/seqid=0 2024-12-04T21:52:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/8ccb27d78a6b4fb18ab7d5a205816cec as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/8ccb27d78a6b4fb18ab7d5a205816cec 2024-12-04T21:52:08,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741841_1017 (size=41747) 2024-12-04T21:52:08,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741841_1017 (size=41747) 2024-12-04T21:52:08,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/8ccb27d78a6b4fb18ab7d5a205816cec, entries=13, sequenceid=60, filesize=18.5 K 2024-12-04T21:52:08,753 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/c5653b30399f4d369151f9be5a87c9a2 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 2024-12-04T21:52:08,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 4185395759ca66fa95c987c9f7e36030 in 24ms, sequenceid=60, compaction requested=false 2024-12-04T21:52:08,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:08,753 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,753 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,753 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216 because midkey is the same as first or last row 2024-12-04T21:52:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:08,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-04T21:52:08,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/d6e9a99a55024ee5a9d0f358d35db2b3 is 1080, key is row0048/info:/1733349128730/Put/seqid=0 2024-12-04T21:52:08,760 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4185395759ca66fa95c987c9f7e36030/info of 4185395759ca66fa95c987c9f7e36030 into c5653b30399f4d369151f9be5a87c9a2(size=40.8 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:08,760 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., storeName=4185395759ca66fa95c987c9f7e36030/info, priority=13, startTime=1733349128727; duration=0sec 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 because midkey is the same as first or last row 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 because midkey is the same as first or last row 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 because midkey is the same as first or last row 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:08,760 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4185395759ca66fa95c987c9f7e36030:info 2024-12-04T21:52:08,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741842_1018 (size=17894) 2024-12-04T21:52:08,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741842_1018 (size=17894) 2024-12-04T21:52:08,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/d6e9a99a55024ee5a9d0f358d35db2b3 2024-12-04T21:52:08,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/d6e9a99a55024ee5a9d0f358d35db2b3 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/d6e9a99a55024ee5a9d0f358d35db2b3 2024-12-04T21:52:08,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/d6e9a99a55024ee5a9d0f358d35db2b3, entries=12, sequenceid=75, filesize=17.5 K 2024-12-04T21:52:08,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for 4185395759ca66fa95c987c9f7e36030 in 22ms, sequenceid=75, compaction requested=true 2024-12-04T21:52:08,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:08,776 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=76.8 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,776 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,776 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 because midkey is the same as first or last row 2024-12-04T21:52:08,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4185395759ca66fa95c987c9f7e36030:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:08,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:08,776 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:08,777 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78628 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:08,777 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): 4185395759ca66fa95c987c9f7e36030/info is initiating minor compaction (all files) 2024-12-04T21:52:08,777 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4185395759ca66fa95c987c9f7e36030/info in TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:08,778 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/8ccb27d78a6b4fb18ab7d5a205816cec, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/d6e9a99a55024ee5a9d0f358d35db2b3] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp, totalSize=76.8 K 2024-12-04T21:52:08,778 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5653b30399f4d369151f9be5a87c9a2, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733349126626 2024-12-04T21:52:08,778 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8ccb27d78a6b4fb18ab7d5a205816cec, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1733349128705 2024-12-04T21:52:08,778 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6e9a99a55024ee5a9d0f358d35db2b3, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733349128730 2024-12-04T21:52:08,789 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4185395759ca66fa95c987c9f7e36030#info#compaction#60 average throughput is 60.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:08,790 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/1f6603313295496d9d18b7a68fc9d634 is 1080, key is row0001/info:/1733349126626/Put/seqid=0 2024-12-04T21:52:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741843_1019 (size=68843) 2024-12-04T21:52:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741843_1019 (size=68843) 2024-12-04T21:52:08,799 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/1f6603313295496d9d18b7a68fc9d634 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 2024-12-04T21:52:08,804 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4185395759ca66fa95c987c9f7e36030/info of 4185395759ca66fa95c987c9f7e36030 into 1f6603313295496d9d18b7a68fc9d634(size=67.2 K), total size for store is 67.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:08,804 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., storeName=4185395759ca66fa95c987c9f7e36030/info, priority=13, startTime=1733349128776; duration=0sec 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 because midkey is the same as first or last row 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 because midkey is the same as first or last row 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 because midkey is the same as first or last row 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:08,804 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4185395759ca66fa95c987c9f7e36030:info 2024-12-04T21:52:08,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:08,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:09,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:09,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:09,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:10,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:10,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:10,769 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:52:10,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/ebe654f92bd9448a8a2ad7c117ec453f is 1080, key is row0060/info:/1733349128756/Put/seqid=0 2024-12-04T21:52:10,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741844_1020 (size=12509) 2024-12-04T21:52:10,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741844_1020 (size=12509) 2024-12-04T21:52:10,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/ebe654f92bd9448a8a2ad7c117ec453f 2024-12-04T21:52:10,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/ebe654f92bd9448a8a2ad7c117ec453f as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ebe654f92bd9448a8a2ad7c117ec453f 2024-12-04T21:52:10,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ebe654f92bd9448a8a2ad7c117ec453f, entries=7, sequenceid=87, filesize=12.2 K 2024-12-04T21:52:10,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 4185395759ca66fa95c987c9f7e36030 in 22ms, sequenceid=87, compaction requested=false 2024-12-04T21:52:10,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:10,791 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.4 K, sizeToCheck=16.0 K 2024-12-04T21:52:10,791 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:10,791 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 because midkey is the same as first or last row 2024-12-04T21:52:10,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:10,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-04T21:52:10,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/864374b77c344529b83385a070a2a6e7 is 1080, key is row0067/info:/1733349130770/Put/seqid=0 2024-12-04T21:52:10,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741845_1021 (size=17894) 2024-12-04T21:52:10,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741845_1021 (size=17894) 2024-12-04T21:52:10,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/864374b77c344529b83385a070a2a6e7 2024-12-04T21:52:10,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/864374b77c344529b83385a070a2a6e7 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/864374b77c344529b83385a070a2a6e7 2024-12-04T21:52:10,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/864374b77c344529b83385a070a2a6e7, entries=12, sequenceid=102, filesize=17.5 K 2024-12-04T21:52:10,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for 4185395759ca66fa95c987c9f7e36030 in 27ms, sequenceid=102, compaction requested=true 2024-12-04T21:52:10,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:10,818 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=96.9 K, sizeToCheck=16.0 K 2024-12-04T21:52:10,818 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:10,818 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 because midkey is the same as first or last row 2024-12-04T21:52:10,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4185395759ca66fa95c987c9f7e36030:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:10,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:10,819 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:10,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:10,820 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 99246 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:10,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-04T21:52:10,820 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): 4185395759ca66fa95c987c9f7e36030/info is initiating minor compaction (all files) 2024-12-04T21:52:10,820 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4185395759ca66fa95c987c9f7e36030/info in TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:10,820 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ebe654f92bd9448a8a2ad7c117ec453f, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/864374b77c344529b83385a070a2a6e7] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp, totalSize=96.9 K 2024-12-04T21:52:10,820 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f6603313295496d9d18b7a68fc9d634, keycount=59, bloomtype=ROW, size=67.2 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733349126626 2024-12-04T21:52:10,821 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting ebe654f92bd9448a8a2ad7c117ec453f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733349128756 2024-12-04T21:52:10,821 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 864374b77c344529b83385a070a2a6e7, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733349130770 2024-12-04T21:52:10,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/22fe535c03fb4fbcb4a1f8d63b0b7678 is 1080, key is row0079/info:/1733349130793/Put/seqid=0 2024-12-04T21:52:10,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741846_1022 (size=20064) 2024-12-04T21:52:10,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741846_1022 (size=20064) 2024-12-04T21:52:10,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/22fe535c03fb4fbcb4a1f8d63b0b7678 2024-12-04T21:52:10,834 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4185395759ca66fa95c987c9f7e36030#info#compaction#64 average throughput is 40.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:10,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/22fe535c03fb4fbcb4a1f8d63b0b7678 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/22fe535c03fb4fbcb4a1f8d63b0b7678 2024-12-04T21:52:10,834 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/ec5c97d16d3941f6bb8003badf88ea7b is 1080, key is row0001/info:/1733349126626/Put/seqid=0 2024-12-04T21:52:10,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741847_1023 (size=89485) 2024-12-04T21:52:10,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741847_1023 (size=89485) 2024-12-04T21:52:10,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/22fe535c03fb4fbcb4a1f8d63b0b7678, entries=14, sequenceid=119, filesize=19.6 K 2024-12-04T21:52:10,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=4.20 KB/4304 for 4185395759ca66fa95c987c9f7e36030 in 21ms, sequenceid=119, compaction requested=false 2024-12-04T21:52:10,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:10,841 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=116.5 K, sizeToCheck=16.0 K 2024-12-04T21:52:10,841 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:10,841 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 because midkey is the same as first or last row 2024-12-04T21:52:10,842 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/ec5c97d16d3941f6bb8003badf88ea7b as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b 2024-12-04T21:52:10,848 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4185395759ca66fa95c987c9f7e36030/info of 4185395759ca66fa95c987c9f7e36030 into ec5c97d16d3941f6bb8003badf88ea7b(size=87.4 K), total size for store is 107.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4185395759ca66fa95c987c9f7e36030: 2024-12-04T21:52:10,848 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., storeName=4185395759ca66fa95c987c9f7e36030/info, priority=13, startTime=1733349130818; duration=0sec 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.0 K, sizeToCheck=16.0 K 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.0 K, sizeToCheck=16.0 K 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.0 K, sizeToCheck=16.0 K 2024-12-04T21:52:10,848 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-04T21:52:10,849 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:10,849 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:10,849 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4185395759ca66fa95c987c9f7e36030:info 2024-12-04T21:52:10,850 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45219 {}] assignment.AssignmentManager(1363): Split request from bb3046a53f79,46213,1733349115759, parent={ENCODED => 4185395759ca66fa95c987c9f7e36030, NAME => 'TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-04T21:52:10,854 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45219 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=bb3046a53f79,46213,1733349115759 2024-12-04T21:52:10,857 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45219 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4185395759ca66fa95c987c9f7e36030, daughterA=46be0065dc8998c42a2f9522ae13b7f5, daughterB=f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:10,858 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4185395759ca66fa95c987c9f7e36030, daughterA=46be0065dc8998c42a2f9522ae13b7f5, daughterB=f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:10,858 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4185395759ca66fa95c987c9f7e36030, daughterA=46be0065dc8998c42a2f9522ae13b7f5, daughterB=f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:10,858 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4185395759ca66fa95c987c9f7e36030, daughterA=46be0065dc8998c42a2f9522ae13b7f5, daughterB=f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:10,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, UNASSIGN}] 2024-12-04T21:52:10,865 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, UNASSIGN 2024-12-04T21:52:10,866 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4185395759ca66fa95c987c9f7e36030, regionState=CLOSING, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:52:10,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, UNASSIGN because future has completed 2024-12-04T21:52:10,869 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-04T21:52:10,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4185395759ca66fa95c987c9f7e36030, server=bb3046a53f79,46213,1733349115759}] 2024-12-04T21:52:10,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:10,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:11,029 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,029 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-04T21:52:11,030 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 4185395759ca66fa95c987c9f7e36030, disabling compactions & flushes 2024-12-04T21:52:11,030 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:11,030 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:11,030 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. after waiting 0 ms 2024-12-04T21:52:11,030 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:11,030 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 4185395759ca66fa95c987c9f7e36030 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-04T21:52:11,038 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/a6a5b130fb4a4ad987dc58747f561d52 is 1080, key is row0093/info:/1733349130821/Put/seqid=0 2024-12-04T21:52:11,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741848_1024 (size=9270) 2024-12-04T21:52:11,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741848_1024 (size=9270) 2024-12-04T21:52:11,045 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/a6a5b130fb4a4ad987dc58747f561d52 2024-12-04T21:52:11,051 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/.tmp/info/a6a5b130fb4a4ad987dc58747f561d52 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/a6a5b130fb4a4ad987dc58747f561d52 2024-12-04T21:52:11,056 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/a6a5b130fb4a4ad987dc58747f561d52, entries=4, sequenceid=127, filesize=9.1 K 2024-12-04T21:52:11,057 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 4185395759ca66fa95c987c9f7e36030 in 27ms, sequenceid=127, compaction requested=true 2024-12-04T21:52:11,059 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/514ad2b7cf1946fea38e375a4ded3694, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/764d02bd376d493585246d69e5735de3, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/8ccb27d78a6b4fb18ab7d5a205816cec, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/d6e9a99a55024ee5a9d0f358d35db2b3, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ebe654f92bd9448a8a2ad7c117ec453f, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/864374b77c344529b83385a070a2a6e7] to archive 2024-12-04T21:52:11,060 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T21:52:11,061 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/514ad2b7cf1946fea38e375a4ded3694 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/514ad2b7cf1946fea38e375a4ded3694 2024-12-04T21:52:11,063 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/98f26eb26be94afcb8f8c87bb4fab216 2024-12-04T21:52:11,064 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/c5653b30399f4d369151f9be5a87c9a2 2024-12-04T21:52:11,065 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/764d02bd376d493585246d69e5735de3 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/764d02bd376d493585246d69e5735de3 2024-12-04T21:52:11,067 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/8ccb27d78a6b4fb18ab7d5a205816cec to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/8ccb27d78a6b4fb18ab7d5a205816cec 2024-12-04T21:52:11,068 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/1f6603313295496d9d18b7a68fc9d634 2024-12-04T21:52:11,069 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/d6e9a99a55024ee5a9d0f358d35db2b3 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/d6e9a99a55024ee5a9d0f358d35db2b3 2024-12-04T21:52:11,070 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ebe654f92bd9448a8a2ad7c117ec453f to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ebe654f92bd9448a8a2ad7c117ec453f 2024-12-04T21:52:11,071 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/864374b77c344529b83385a070a2a6e7 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/864374b77c344529b83385a070a2a6e7 2024-12-04T21:52:11,077 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-12-04T21:52:11,078 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. 2024-12-04T21:52:11,078 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 4185395759ca66fa95c987c9f7e36030: Waiting for close lock at 1733349131030Running coprocessor pre-close hooks at 1733349131030Disabling compacts and flushes for region at 1733349131030Disabling writes for close at 1733349131030Obtaining lock to block concurrent updates at 1733349131030Preparing flush snapshotting stores in 4185395759ca66fa95c987c9f7e36030 at 1733349131030Finished memstore snapshotting TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., syncing WAL and waiting on mvcc, flushsize=dataSize=4304, getHeapSize=4848, getOffHeapSize=0, getCellsCount=4 at 1733349131031 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. at 1733349131032 (+1 ms)Flushing 4185395759ca66fa95c987c9f7e36030/info: creating writer at 1733349131032Flushing 4185395759ca66fa95c987c9f7e36030/info: appending metadata at 1733349131037 (+5 ms)Flushing 4185395759ca66fa95c987c9f7e36030/info: closing flushed file at 1733349131037Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a76c5bf: reopening flushed file at 1733349131050 (+13 ms)Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 4185395759ca66fa95c987c9f7e36030 in 27ms, sequenceid=127, compaction requested=true at 1733349131057 (+7 ms)Writing region close event to WAL at 1733349131074 (+17 ms)Running coprocessor post-close hooks at 1733349131078 (+4 ms)Closed at 1733349131078 2024-12-04T21:52:11,080 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,081 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4185395759ca66fa95c987c9f7e36030, regionState=CLOSED 2024-12-04T21:52:11,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4185395759ca66fa95c987c9f7e36030, server=bb3046a53f79,46213,1733349115759 because future has completed 2024-12-04T21:52:11,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-04T21:52:11,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 4185395759ca66fa95c987c9f7e36030, server=bb3046a53f79,46213,1733349115759 in 215 msec 2024-12-04T21:52:11,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-04T21:52:11,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4185395759ca66fa95c987c9f7e36030, UNASSIGN in 222 msec 2024-12-04T21:52:11,097 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:11,101 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=4185395759ca66fa95c987c9f7e36030, threads=3 2024-12-04T21:52:11,103 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b for region: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,103 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/22fe535c03fb4fbcb4a1f8d63b0b7678 for region: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,103 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/a6a5b130fb4a4ad987dc58747f561d52 for region: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,112 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/a6a5b130fb4a4ad987dc58747f561d52, top=true 2024-12-04T21:52:11,112 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/22fe535c03fb4fbcb4a1f8d63b0b7678, top=true 2024-12-04T21:52:11,121 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678 for child: f4d8f05e8a76a38fda262a0f09617252, parent: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,121 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52 for child: f4d8f05e8a76a38fda262a0f09617252, parent: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,122 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/a6a5b130fb4a4ad987dc58747f561d52 for region: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,122 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/22fe535c03fb4fbcb4a1f8d63b0b7678 for region: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741849_1025 (size=27) 2024-12-04T21:52:11,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741849_1025 (size=27) 2024-12-04T21:52:11,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741850_1026 (size=27) 2024-12-04T21:52:11,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741850_1026 (size=27) 2024-12-04T21:52:11,134 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b for region: 4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:11,137 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 4185395759ca66fa95c987c9f7e36030 Daughter A: [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030] storefiles, Daughter B: [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030] storefiles. 2024-12-04T21:52:11,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741851_1027 (size=71) 2024-12-04T21:52:11,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741851_1027 (size=71) 2024-12-04T21:52:11,147 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:11,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741852_1028 (size=71) 2024-12-04T21:52:11,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741852_1028 (size=71) 2024-12-04T21:52:11,160 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:11,169 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-04T21:52:11,171 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-04T21:52:11,173 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733349131172"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733349131172"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733349131172"}]},"ts":"1733349131172"} 2024-12-04T21:52:11,173 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733349131172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733349131172"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733349131172"}]},"ts":"1733349131172"} 2024-12-04T21:52:11,173 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733349131172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733349131172"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733349131172"}]},"ts":"1733349131172"} 2024-12-04T21:52:11,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46be0065dc8998c42a2f9522ae13b7f5, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f4d8f05e8a76a38fda262a0f09617252, ASSIGN}] 2024-12-04T21:52:11,188 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46be0065dc8998c42a2f9522ae13b7f5, ASSIGN 2024-12-04T21:52:11,188 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f4d8f05e8a76a38fda262a0f09617252, ASSIGN 2024-12-04T21:52:11,189 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f4d8f05e8a76a38fda262a0f09617252, ASSIGN; state=SPLITTING_NEW, location=bb3046a53f79,46213,1733349115759; forceNewPlan=false, retain=false 2024-12-04T21:52:11,189 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46be0065dc8998c42a2f9522ae13b7f5, ASSIGN; state=SPLITTING_NEW, location=bb3046a53f79,46213,1733349115759; forceNewPlan=false, retain=false 2024-12-04T21:52:11,340 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f4d8f05e8a76a38fda262a0f09617252, regionState=OPENING, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:52:11,340 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=46be0065dc8998c42a2f9522ae13b7f5, regionState=OPENING, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:52:11,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f4d8f05e8a76a38fda262a0f09617252, ASSIGN because future has completed 2024-12-04T21:52:11,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f4d8f05e8a76a38fda262a0f09617252, server=bb3046a53f79,46213,1733349115759}] 2024-12-04T21:52:11,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46be0065dc8998c42a2f9522ae13b7f5, ASSIGN because future has completed 2024-12-04T21:52:11,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 46be0065dc8998c42a2f9522ae13b7f5, server=bb3046a53f79,46213,1733349115759}] 2024-12-04T21:52:11,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:11,508 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:11,509 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 46be0065dc8998c42a2f9522ae13b7f5, NAME => 'TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-04T21:52:11,509 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,509 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:52:11,509 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,509 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,511 INFO [StoreOpener-46be0065dc8998c42a2f9522ae13b7f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,513 INFO [StoreOpener-46be0065dc8998c42a2f9522ae13b7f5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46be0065dc8998c42a2f9522ae13b7f5 columnFamilyName info 2024-12-04T21:52:11,513 DEBUG [StoreOpener-46be0065dc8998c42a2f9522ae13b7f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:11,524 DEBUG [StoreOpener-46be0065dc8998c42a2f9522ae13b7f5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030->hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b-bottom 2024-12-04T21:52:11,525 INFO [StoreOpener-46be0065dc8998c42a2f9522ae13b7f5-1 {}] regionserver.HStore(327): Store=46be0065dc8998c42a2f9522ae13b7f5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:52:11,525 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,526 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,527 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,527 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,527 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,529 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,530 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 46be0065dc8998c42a2f9522ae13b7f5; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784775, jitterRate=-0.0021079182624816895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:52:11,530 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:11,530 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 46be0065dc8998c42a2f9522ae13b7f5: Running coprocessor pre-open hook at 1733349131510Writing region info on filesystem at 1733349131510Initializing all the Stores at 1733349131511 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349131511Cleaning up temporary data from old regions at 1733349131527 (+16 ms)Running coprocessor post-open hooks at 1733349131530 (+3 ms)Region opened successfully at 1733349131530 2024-12-04T21:52:11,531 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5., pid=13, masterSystemTime=1733349131499 2024-12-04T21:52:11,531 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 46be0065dc8998c42a2f9522ae13b7f5:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:11,531 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:11,531 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-04T21:52:11,532 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:11,532 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): 46be0065dc8998c42a2f9522ae13b7f5/info is initiating minor compaction (all files) 2024-12-04T21:52:11,532 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 46be0065dc8998c42a2f9522ae13b7f5/info in TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:11,532 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030->hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b-bottom] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/.tmp, totalSize=87.4 K 2024-12-04T21:52:11,533 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030, keycount=39, bloomtype=ROW, size=87.4 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733349126626 2024-12-04T21:52:11,533 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:11,533 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:11,533 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:11,533 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => f4d8f05e8a76a38fda262a0f09617252, NAME => 'TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-04T21:52:11,534 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,534 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:52:11,534 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,534 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=46be0065dc8998c42a2f9522ae13b7f5, regionState=OPEN, openSeqNum=131, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:52:11,534 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,535 INFO [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,536 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-04T21:52:11,536 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-04T21:52:11,536 INFO [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f4d8f05e8a76a38fda262a0f09617252 columnFamilyName info 2024-12-04T21:52:11,536 DEBUG [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:11,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-04T21:52:11,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 46be0065dc8998c42a2f9522ae13b7f5, server=bb3046a53f79,46213,1733349115759 because future has completed 2024-12-04T21:52:11,537 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45219 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=bb3046a53f79,46213,1733349115759, table=TestLogRolling-testLogRolling, region=46be0065dc8998c42a2f9522ae13b7f5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-04T21:52:11,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-04T21:52:11,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 46be0065dc8998c42a2f9522ae13b7f5, server=bb3046a53f79,46213,1733349115759 in 191 msec 2024-12-04T21:52:11,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46be0065dc8998c42a2f9522ae13b7f5, ASSIGN in 353 msec 2024-12-04T21:52:11,547 DEBUG [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678 2024-12-04T21:52:11,551 DEBUG [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52 2024-12-04T21:52:11,555 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46be0065dc8998c42a2f9522ae13b7f5#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:11,555 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/.tmp/info/4cf5f55227f943e0967743f7bc91bf76 is 1080, key is row0001/info:/1733349126626/Put/seqid=0 2024-12-04T21:52:11,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/info/0cbcfe0b828749439aeb49226231e41c is 193, key is TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252./info:regioninfo/1733349131340/Put/seqid=0 2024-12-04T21:52:11,555 DEBUG [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030->hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b-top 2024-12-04T21:52:11,556 INFO [StoreOpener-f4d8f05e8a76a38fda262a0f09617252-1 {}] regionserver.HStore(327): Store=f4d8f05e8a76a38fda262a0f09617252/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:52:11,556 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,556 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,558 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,558 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,558 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741853_1029 (size=70862) 2024-12-04T21:52:11,560 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741854_1030 (size=9847) 2024-12-04T21:52:11,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741854_1030 (size=9847) 2024-12-04T21:52:11,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741853_1029 (size=70862) 2024-12-04T21:52:11,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/info/0cbcfe0b828749439aeb49226231e41c 2024-12-04T21:52:11,561 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened f4d8f05e8a76a38fda262a0f09617252; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769894, jitterRate=-0.02103005349636078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-04T21:52:11,561 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:11,561 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for f4d8f05e8a76a38fda262a0f09617252: Running coprocessor pre-open hook at 1733349131534Writing region info on filesystem at 1733349131534Initializing all the Stores at 1733349131535 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349131535Cleaning up temporary data from old regions at 1733349131558 (+23 ms)Running coprocessor post-open hooks at 1733349131561 (+3 ms)Region opened successfully at 1733349131561 2024-12-04T21:52:11,562 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., pid=12, masterSystemTime=1733349131499 2024-12-04T21:52:11,562 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 2 2024-12-04T21:52:11,562 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:11,562 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:11,563 INFO [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:11,564 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:11,564 INFO [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:11,564 INFO [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030->hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b-top, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=116.0 K 2024-12-04T21:52:11,565 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] compactions.Compactor(225): Compacting ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030, keycount=39, bloomtype=ROW, size=87.4 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1733349126626 2024-12-04T21:52:11,565 DEBUG [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:11,565 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733349130793 2024-12-04T21:52:11,565 INFO [RS_OPEN_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:11,566 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52, keycount=4, bloomtype=ROW, size=9.1 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733349130821 2024-12-04T21:52:11,566 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f4d8f05e8a76a38fda262a0f09617252, regionState=OPEN, openSeqNum=131, regionLocation=bb3046a53f79,46213,1733349115759 2024-12-04T21:52:11,567 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/.tmp/info/4cf5f55227f943e0967743f7bc91bf76 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/4cf5f55227f943e0967743f7bc91bf76 2024-12-04T21:52:11,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f4d8f05e8a76a38fda262a0f09617252, server=bb3046a53f79,46213,1733349115759 because future has completed 2024-12-04T21:52:11,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-04T21:52:11,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure f4d8f05e8a76a38fda262a0f09617252, server=bb3046a53f79,46213,1733349115759 in 226 msec 2024-12-04T21:52:11,574 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 46be0065dc8998c42a2f9522ae13b7f5/info of 46be0065dc8998c42a2f9522ae13b7f5 into 4cf5f55227f943e0967743f7bc91bf76(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:11,574 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 46be0065dc8998c42a2f9522ae13b7f5: 2024-12-04T21:52:11,574 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5., storeName=46be0065dc8998c42a2f9522ae13b7f5/info, priority=15, startTime=1733349131531; duration=0sec 2024-12-04T21:52:11,574 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:11,574 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46be0065dc8998c42a2f9522ae13b7f5:info 2024-12-04T21:52:11,576 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-04T21:52:11,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f4d8f05e8a76a38fda262a0f09617252, ASSIGN in 387 msec 2024-12-04T21:52:11,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4185395759ca66fa95c987c9f7e36030, daughterA=46be0065dc8998c42a2f9522ae13b7f5, daughterB=f4d8f05e8a76a38fda262a0f09617252 in 723 msec 2024-12-04T21:52:11,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/ns/f3c40e4379734003bf1a4be76838fe73 is 43, key is default/ns:d/1733349116537/Put/seqid=0 2024-12-04T21:52:11,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741855_1031 (size=5153) 2024-12-04T21:52:11,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741855_1031 (size=5153) 2024-12-04T21:52:11,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/ns/f3c40e4379734003bf1a4be76838fe73 2024-12-04T21:52:11,588 INFO [RS:0;bb3046a53f79:46213-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#69 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:11,588 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/578df3b2c2804f47ae8393cb8def757d is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:11,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741856_1032 (size=42984) 2024-12-04T21:52:11,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741856_1032 (size=42984) 2024-12-04T21:52:11,598 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/578df3b2c2804f47ae8393cb8def757d as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/578df3b2c2804f47ae8393cb8def757d 2024-12-04T21:52:11,604 INFO [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into 578df3b2c2804f47ae8393cb8def757d(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:11,604 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:11,604 INFO [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349131562; duration=0sec 2024-12-04T21:52:11,604 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:11,604 DEBUG [RS:0;bb3046a53f79:46213-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:11,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/table/d52bbc89f03446698800a496007768eb is 65, key is TestLogRolling-testLogRolling/table:state/1733349116965/Put/seqid=0 2024-12-04T21:52:11,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741857_1033 (size=5340) 2024-12-04T21:52:11,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741857_1033 (size=5340) 2024-12-04T21:52:11,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/table/d52bbc89f03446698800a496007768eb 2024-12-04T21:52:11,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/info/0cbcfe0b828749439aeb49226231e41c as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/info/0cbcfe0b828749439aeb49226231e41c 2024-12-04T21:52:11,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/info/0cbcfe0b828749439aeb49226231e41c, entries=30, sequenceid=17, filesize=9.6 K 2024-12-04T21:52:11,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/ns/f3c40e4379734003bf1a4be76838fe73 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/ns/f3c40e4379734003bf1a4be76838fe73 2024-12-04T21:52:11,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/ns/f3c40e4379734003bf1a4be76838fe73, entries=2, sequenceid=17, filesize=5.0 K 2024-12-04T21:52:11,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/table/d52bbc89f03446698800a496007768eb as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/table/d52bbc89f03446698800a496007768eb 2024-12-04T21:52:11,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/table/d52bbc89f03446698800a496007768eb, entries=2, sequenceid=17, filesize=5.2 K 2024-12-04T21:52:11,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 98ms, sequenceid=17, compaction requested=false 2024-12-04T21:52:11,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-04T21:52:11,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:11,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:12,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:58462 deadline: 1733349142831, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. is not online on bb3046a53f79,46213,1733349115759 2024-12-04T21:52:12,842 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., hostname=bb3046a53f79,46213,1733349115759, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., hostname=bb3046a53f79,46213,1733349115759, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. is not online on bb3046a53f79,46213,1733349115759 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T21:52:12,843 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., hostname=bb3046a53f79,46213,1733349115759, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030. is not online on bb3046a53f79,46213,1733349115759 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-04T21:52:12,843 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733349116590.4185395759ca66fa95c987c9f7e36030., hostname=bb3046a53f79,46213,1733349115759, seqNum=2 from cache 2024-12-04T21:52:12,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:12,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:13,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:13,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:13,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:14,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:14,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:15,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:15,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:16,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:16,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:16,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:16,617 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-04T21:52:16,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,649 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:16,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-04T21:52:17,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:17,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:17,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:18,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:18,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:18,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:19,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:19,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:19,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:20,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:20,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:20,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:21,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:21,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:21,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:22,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:22,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:22,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:22,867 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., hostname=bb3046a53f79,46213,1733349115759, seqNum=131] 2024-12-04T21:52:22,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:22,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-04T21:52:22,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/53789a08f54c408488759e4ff35163c0 is 1080, key is row0097/info:/1733349142868/Put/seqid=0 2024-12-04T21:52:22,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741858_1034 (size=12516) 2024-12-04T21:52:22,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741858_1034 (size=12516) 2024-12-04T21:52:22,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/53789a08f54c408488759e4ff35163c0 2024-12-04T21:52:22,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/53789a08f54c408488759e4ff35163c0 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53789a08f54c408488759e4ff35163c0 2024-12-04T21:52:22,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53789a08f54c408488759e4ff35163c0, entries=7, sequenceid=141, filesize=12.2 K 2024-12-04T21:52:22,903 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for f4d8f05e8a76a38fda262a0f09617252 in 22ms, sequenceid=141, compaction requested=false 2024-12-04T21:52:22,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:22,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-04T21:52:22,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/20fb6bf04a1d48d39ef171cf9bc37bbc is 1080, key is row0104/info:/1733349142882/Put/seqid=0 2024-12-04T21:52:22,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741859_1035 (size=16828) 2024-12-04T21:52:22,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741859_1035 (size=16828) 2024-12-04T21:52:22,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/20fb6bf04a1d48d39ef171cf9bc37bbc 2024-12-04T21:52:22,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/20fb6bf04a1d48d39ef171cf9bc37bbc as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/20fb6bf04a1d48d39ef171cf9bc37bbc 2024-12-04T21:52:22,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/20fb6bf04a1d48d39ef171cf9bc37bbc, entries=11, sequenceid=155, filesize=16.4 K 2024-12-04T21:52:22,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for f4d8f05e8a76a38fda262a0f09617252 in 43ms, sequenceid=155, compaction requested=true 2024-12-04T21:52:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:22,947 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:22,948 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 72328 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:22,949 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:22,949 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:22,949 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/578df3b2c2804f47ae8393cb8def757d, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53789a08f54c408488759e4ff35163c0, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/20fb6bf04a1d48d39ef171cf9bc37bbc] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=70.6 K 2024-12-04T21:52:22,949 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 578df3b2c2804f47ae8393cb8def757d, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733349128760 2024-12-04T21:52:22,950 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 53789a08f54c408488759e4ff35163c0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733349142868 2024-12-04T21:52:22,950 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20fb6bf04a1d48d39ef171cf9bc37bbc, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733349142882 2024-12-04T21:52:22,963 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#73 average throughput is 54.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:22,963 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/53e786a33828420fa9825aa12bcfef03 is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:22,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741860_1036 (size=62558) 2024-12-04T21:52:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741860_1036 (size=62558) 2024-12-04T21:52:22,984 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/53e786a33828420fa9825aa12bcfef03 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53e786a33828420fa9825aa12bcfef03 2024-12-04T21:52:22,991 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into 53e786a33828420fa9825aa12bcfef03(size=61.1 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:22,991 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:22,991 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349142947; duration=0sec 2024-12-04T21:52:22,991 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:22,991 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:23,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:23,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:23,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:24,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:24,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:24,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:24,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:24,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-04T21:52:24,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/a76ab38637c44bf5a4dc6b72f4e53dee is 1080, key is row0115/info:/1733349142905/Put/seqid=0 2024-12-04T21:52:24,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741861_1037 (size=21156) 2024-12-04T21:52:24,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741861_1037 (size=21156) 2024-12-04T21:52:24,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/a76ab38637c44bf5a4dc6b72f4e53dee 2024-12-04T21:52:24,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/a76ab38637c44bf5a4dc6b72f4e53dee as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/a76ab38637c44bf5a4dc6b72f4e53dee 2024-12-04T21:52:24,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/a76ab38637c44bf5a4dc6b72f4e53dee, entries=15, sequenceid=174, filesize=20.7 K 2024-12-04T21:52:24,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for f4d8f05e8a76a38fda262a0f09617252 in 23ms, sequenceid=174, compaction requested=false 2024-12-04T21:52:24,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:24,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-04T21:52:24,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/76c49853e44c4377b95fa18541c0943e is 1080, key is row0130/info:/1733349144946/Put/seqid=0 2024-12-04T21:52:24,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741862_1038 (size=16828) 2024-12-04T21:52:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741862_1038 (size=16828) 2024-12-04T21:52:24,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/76c49853e44c4377b95fa18541c0943e 2024-12-04T21:52:24,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/76c49853e44c4377b95fa18541c0943e as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/76c49853e44c4377b95fa18541c0943e 2024-12-04T21:52:24,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/76c49853e44c4377b95fa18541c0943e, entries=11, sequenceid=188, filesize=16.4 K 2024-12-04T21:52:24,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for f4d8f05e8a76a38fda262a0f09617252 in 28ms, sequenceid=188, compaction requested=true 2024-12-04T21:52:24,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:24,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:24,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:24,997 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:24,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-04T21:52:24,999 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100542 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:24,999 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:24,999 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:24,999 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53e786a33828420fa9825aa12bcfef03, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/a76ab38637c44bf5a4dc6b72f4e53dee, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/76c49853e44c4377b95fa18541c0943e] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=98.2 K 2024-12-04T21:52:25,000 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 53e786a33828420fa9825aa12bcfef03, keycount=53, bloomtype=ROW, size=61.1 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733349128760 2024-12-04T21:52:25,001 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting a76ab38637c44bf5a4dc6b72f4e53dee, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733349142905 2024-12-04T21:52:25,001 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 76c49853e44c4377b95fa18541c0943e, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733349144946 2024-12-04T21:52:25,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/047a0aa0632f47c6bea66b0cef80a413 is 1080, key is row0141/info:/1733349144971/Put/seqid=0 2024-12-04T21:52:25,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:25,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741863_1039 (size=17906) 2024-12-04T21:52:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741863_1039 (size=17906) 2024-12-04T21:52:25,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/047a0aa0632f47c6bea66b0cef80a413 2024-12-04T21:52:25,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/047a0aa0632f47c6bea66b0cef80a413 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/047a0aa0632f47c6bea66b0cef80a413 2024-12-04T21:52:25,043 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#77 average throughput is 27.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:25,044 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/c5db1824f9344636bf5807b9a9ef983e is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:25,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/047a0aa0632f47c6bea66b0cef80a413, entries=12, sequenceid=203, filesize=17.5 K 2024-12-04T21:52:25,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for f4d8f05e8a76a38fda262a0f09617252 in 52ms, sequenceid=203, compaction requested=false 2024-12-04T21:52:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:25,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741864_1040 (size=90765) 2024-12-04T21:52:25,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741864_1040 (size=90765) 2024-12-04T21:52:25,066 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/c5db1824f9344636bf5807b9a9ef983e as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/c5db1824f9344636bf5807b9a9ef983e 2024-12-04T21:52:25,073 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into c5db1824f9344636bf5807b9a9ef983e(size=88.6 K), total size for store is 106.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:25,073 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:25,073 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349144997; duration=0sec 2024-12-04T21:52:25,073 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:25,073 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:25,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:25,693 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-04T21:52:26,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:26,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:26,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:27,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:27,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:27,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-04T21:52:27,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/89bdaee656024a0c8ac3695f541421a4 is 1080, key is row0153/info:/1733349145000/Put/seqid=0 2024-12-04T21:52:27,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741865_1041 (size=14672) 2024-12-04T21:52:27,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741865_1041 (size=14672) 2024-12-04T21:52:27,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/89bdaee656024a0c8ac3695f541421a4 2024-12-04T21:52:27,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/89bdaee656024a0c8ac3695f541421a4 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/89bdaee656024a0c8ac3695f541421a4 2024-12-04T21:52:27,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/89bdaee656024a0c8ac3695f541421a4, entries=9, sequenceid=216, filesize=14.3 K 2024-12-04T21:52:27,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for f4d8f05e8a76a38fda262a0f09617252 in 21ms, sequenceid=216, compaction requested=true 2024-12-04T21:52:27,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:27,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:27,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:27,045 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:27,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:27,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-04T21:52:27,046 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:27,046 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:27,046 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:27,046 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/c5db1824f9344636bf5807b9a9ef983e, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/047a0aa0632f47c6bea66b0cef80a413, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/89bdaee656024a0c8ac3695f541421a4] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=120.5 K 2024-12-04T21:52:27,047 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5db1824f9344636bf5807b9a9ef983e, keycount=79, bloomtype=ROW, size=88.6 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733349128760 2024-12-04T21:52:27,047 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 047a0aa0632f47c6bea66b0cef80a413, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733349144971 2024-12-04T21:52:27,047 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 89bdaee656024a0c8ac3695f541421a4, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733349145000 2024-12-04T21:52:27,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/2bdcd66d8cb44268a05ee3b8f61ed8b9 is 1080, key is row0162/info:/1733349147026/Put/seqid=0 2024-12-04T21:52:27,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741866_1042 (size=16828) 2024-12-04T21:52:27,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741866_1042 (size=16828) 2024-12-04T21:52:27,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/2bdcd66d8cb44268a05ee3b8f61ed8b9 2024-12-04T21:52:27,061 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#80 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:27,062 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/ca9b4fa8fd1849eaa5c00c946d0958bd is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:27,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/2bdcd66d8cb44268a05ee3b8f61ed8b9 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/2bdcd66d8cb44268a05ee3b8f61ed8b9 2024-12-04T21:52:27,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741867_1043 (size=113509) 2024-12-04T21:52:27,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741867_1043 (size=113509) 2024-12-04T21:52:27,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/2bdcd66d8cb44268a05ee3b8f61ed8b9, entries=11, sequenceid=230, filesize=16.4 K 2024-12-04T21:52:27,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for f4d8f05e8a76a38fda262a0f09617252 in 26ms, sequenceid=230, compaction requested=false 2024-12-04T21:52:27,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:27,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:27,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-04T21:52:27,073 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/ca9b4fa8fd1849eaa5c00c946d0958bd as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ca9b4fa8fd1849eaa5c00c946d0958bd 2024-12-04T21:52:27,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/15961a30cf5249ec84d82ce308a5060b is 1080, key is row0173/info:/1733349147047/Put/seqid=0 2024-12-04T21:52:27,096 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into ca9b4fa8fd1849eaa5c00c946d0958bd(size=110.8 K), total size for store is 127.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:27,096 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:27,096 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349147045; duration=0sec 2024-12-04T21:52:27,096 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:27,096 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:27,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741868_1044 (size=16828) 2024-12-04T21:52:27,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741868_1044 (size=16828) 2024-12-04T21:52:27,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/15961a30cf5249ec84d82ce308a5060b 2024-12-04T21:52:27,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/15961a30cf5249ec84d82ce308a5060b as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/15961a30cf5249ec84d82ce308a5060b 2024-12-04T21:52:27,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/15961a30cf5249ec84d82ce308a5060b, entries=11, sequenceid=244, filesize=16.4 K 2024-12-04T21:52:27,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for f4d8f05e8a76a38fda262a0f09617252 in 41ms, sequenceid=244, compaction requested=true 2024-12-04T21:52:27,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:27,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:27,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:27,113 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:27,114 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 147165 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:27,115 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:27,115 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:27,115 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ca9b4fa8fd1849eaa5c00c946d0958bd, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/2bdcd66d8cb44268a05ee3b8f61ed8b9, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/15961a30cf5249ec84d82ce308a5060b] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=143.7 K 2024-12-04T21:52:27,115 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting ca9b4fa8fd1849eaa5c00c946d0958bd, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733349128760 2024-12-04T21:52:27,116 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2bdcd66d8cb44268a05ee3b8f61ed8b9, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1733349147026 2024-12-04T21:52:27,116 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 15961a30cf5249ec84d82ce308a5060b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733349147047 2024-12-04T21:52:27,126 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#82 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:27,127 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/93e0df9a561e418daa21523cda37de76 is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:27,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741869_1045 (size=137463) 2024-12-04T21:52:27,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741869_1045 (size=137463) 2024-12-04T21:52:27,136 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/93e0df9a561e418daa21523cda37de76 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/93e0df9a561e418daa21523cda37de76 2024-12-04T21:52:27,142 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into 93e0df9a561e418daa21523cda37de76(size=134.2 K), total size for store is 134.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:27,142 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:27,142 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349147113; duration=0sec 2024-12-04T21:52:27,142 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:27,142 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:27,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:28,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:28,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:28,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:29,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:29,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:29,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-04T21:52:29,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/560a535129f045cb8370a7907644a31a is 1080, key is row0184/info:/1733349147074/Put/seqid=0 2024-12-04T21:52:29,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741870_1046 (size=15751) 2024-12-04T21:52:29,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741870_1046 (size=15751) 2024-12-04T21:52:29,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/560a535129f045cb8370a7907644a31a 2024-12-04T21:52:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/560a535129f045cb8370a7907644a31a as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/560a535129f045cb8370a7907644a31a 2024-12-04T21:52:29,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/560a535129f045cb8370a7907644a31a, entries=10, sequenceid=259, filesize=15.4 K 2024-12-04T21:52:29,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for f4d8f05e8a76a38fda262a0f09617252 in 28ms, sequenceid=259, compaction requested=false 2024-12-04T21:52:29,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:29,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:29,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-04T21:52:29,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/824d18f64d9640989f17c56dfce3a42f is 1080, key is row0194/info:/1733349149117/Put/seqid=0 2024-12-04T21:52:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741871_1047 (size=17918) 2024-12-04T21:52:29,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741871_1047 (size=17918) 2024-12-04T21:52:29,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/824d18f64d9640989f17c56dfce3a42f 2024-12-04T21:52:29,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/824d18f64d9640989f17c56dfce3a42f as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/824d18f64d9640989f17c56dfce3a42f 2024-12-04T21:52:29,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/824d18f64d9640989f17c56dfce3a42f, entries=12, sequenceid=274, filesize=17.5 K 2024-12-04T21:52:29,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for f4d8f05e8a76a38fda262a0f09617252 in 25ms, sequenceid=274, compaction requested=true 2024-12-04T21:52:29,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:29,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:29,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:29,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:29,170 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:29,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-04T21:52:29,171 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 171132 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:29,172 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:29,172 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:29,172 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/93e0df9a561e418daa21523cda37de76, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/560a535129f045cb8370a7907644a31a, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/824d18f64d9640989f17c56dfce3a42f] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=167.1 K 2024-12-04T21:52:29,172 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 93e0df9a561e418daa21523cda37de76, keycount=122, bloomtype=ROW, size=134.2 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733349128760 2024-12-04T21:52:29,173 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 560a535129f045cb8370a7907644a31a, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733349147074 2024-12-04T21:52:29,173 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 824d18f64d9640989f17c56dfce3a42f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733349149117 2024-12-04T21:52:29,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/619a5e06b01349dabab4891ab853d744 is 1080, key is row0206/info:/1733349149146/Put/seqid=0 2024-12-04T21:52:29,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741872_1048 (size=16839) 2024-12-04T21:52:29,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741872_1048 (size=16839) 2024-12-04T21:52:29,185 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#86 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:29,185 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/f6e06f55b1824106bb646513ef587e30 is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:29,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741873_1049 (size=161367) 2024-12-04T21:52:29,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741873_1049 (size=161367) 2024-12-04T21:52:29,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:29,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/619a5e06b01349dabab4891ab853d744 2024-12-04T21:52:29,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/619a5e06b01349dabab4891ab853d744 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/619a5e06b01349dabab4891ab853d744 2024-12-04T21:52:29,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/619a5e06b01349dabab4891ab853d744, entries=11, sequenceid=288, filesize=16.4 K 2024-12-04T21:52:29,596 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/f6e06f55b1824106bb646513ef587e30 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f6e06f55b1824106bb646513ef587e30 2024-12-04T21:52:29,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for f4d8f05e8a76a38fda262a0f09617252 in 426ms, sequenceid=288, compaction requested=false 2024-12-04T21:52:29,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:29,603 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into f6e06f55b1824106bb646513ef587e30(size=157.6 K), total size for store is 174.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:29,603 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:29,603 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349149170; duration=0sec 2024-12-04T21:52:29,603 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:29,603 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:30,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:30,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:30,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:31,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:31,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:31,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:31,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-04T21:52:31,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/3e8e8471e7864a91b3c473ec91d75d89 is 1080, key is row0217/info:/1733349149172/Put/seqid=0 2024-12-04T21:52:31,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741874_1050 (size=14681) 2024-12-04T21:52:31,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741874_1050 (size=14681) 2024-12-04T21:52:31,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/3e8e8471e7864a91b3c473ec91d75d89 2024-12-04T21:52:31,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/3e8e8471e7864a91b3c473ec91d75d89 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/3e8e8471e7864a91b3c473ec91d75d89 2024-12-04T21:52:31,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/3e8e8471e7864a91b3c473ec91d75d89, entries=9, sequenceid=301, filesize=14.3 K 2024-12-04T21:52:31,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=12.61 KB/12912 for f4d8f05e8a76a38fda262a0f09617252 in 38ms, sequenceid=301, compaction requested=true 2024-12-04T21:52:31,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:31,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:31,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:31,230 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:31,231 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192887 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:31,232 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:31,232 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:31,232 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f6e06f55b1824106bb646513ef587e30, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/619a5e06b01349dabab4891ab853d744, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/3e8e8471e7864a91b3c473ec91d75d89] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=188.4 K 2024-12-04T21:52:31,232 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6e06f55b1824106bb646513ef587e30, keycount=144, bloomtype=ROW, size=157.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733349128760 2024-12-04T21:52:31,233 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 619a5e06b01349dabab4891ab853d744, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733349149146 2024-12-04T21:52:31,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:31,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-04T21:52:31,233 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3e8e8471e7864a91b3c473ec91d75d89, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733349149172 2024-12-04T21:52:31,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/0bfba581673a4eb1b1e567287873ec26 is 1080, key is row0226/info:/1733349151192/Put/seqid=0 2024-12-04T21:52:31,251 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#89 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:31,252 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/f04afd83472c41889742a4ae579c9774 is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:31,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741875_1051 (size=20092) 2024-12-04T21:52:31,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741875_1051 (size=20092) 2024-12-04T21:52:31,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/0bfba581673a4eb1b1e567287873ec26 2024-12-04T21:52:31,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/0bfba581673a4eb1b1e567287873ec26 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0bfba581673a4eb1b1e567287873ec26 2024-12-04T21:52:31,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741876_1052 (size=183053) 2024-12-04T21:52:31,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741876_1052 (size=183053) 2024-12-04T21:52:31,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0bfba581673a4eb1b1e567287873ec26, entries=14, sequenceid=318, filesize=19.6 K 2024-12-04T21:52:31,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for f4d8f05e8a76a38fda262a0f09617252 in 42ms, sequenceid=318, compaction requested=false 2024-12-04T21:52:31,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:31,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46213 {}] regionserver.HRegion(8855): Flush requested on f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:31,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-04T21:52:31,280 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/f04afd83472c41889742a4ae579c9774 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f04afd83472c41889742a4ae579c9774 2024-12-04T21:52:31,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/46a35270695c4abeb47e15d8b4b28783 is 1080, key is row0240/info:/1733349151236/Put/seqid=0 2024-12-04T21:52:31,288 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into f04afd83472c41889742a4ae579c9774(size=178.8 K), total size for store is 198.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:31,288 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:31,288 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349151230; duration=0sec 2024-12-04T21:52:31,288 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:31,288 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:31,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741877_1053 (size=19013) 2024-12-04T21:52:31,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741877_1053 (size=19013) 2024-12-04T21:52:31,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/46a35270695c4abeb47e15d8b4b28783 2024-12-04T21:52:31,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/46a35270695c4abeb47e15d8b4b28783 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/46a35270695c4abeb47e15d8b4b28783 2024-12-04T21:52:31,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/46a35270695c4abeb47e15d8b4b28783, entries=13, sequenceid=334, filesize=18.6 K 2024-12-04T21:52:31,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for f4d8f05e8a76a38fda262a0f09617252 in 45ms, sequenceid=334, compaction requested=true 2024-12-04T21:52:31,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:31,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f4d8f05e8a76a38fda262a0f09617252:info, priority=-2147483648, current under compaction store size is 1 2024-12-04T21:52:31,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:31,322 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-04T21:52:31,324 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 222158 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-04T21:52:31,324 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1541): f4d8f05e8a76a38fda262a0f09617252/info is initiating minor compaction (all files) 2024-12-04T21:52:31,324 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f4d8f05e8a76a38fda262a0f09617252/info in TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:31,324 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f04afd83472c41889742a4ae579c9774, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0bfba581673a4eb1b1e567287873ec26, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/46a35270695c4abeb47e15d8b4b28783] into tmpdir=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp, totalSize=217.0 K 2024-12-04T21:52:31,325 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting f04afd83472c41889742a4ae579c9774, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733349128760 2024-12-04T21:52:31,326 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0bfba581673a4eb1b1e567287873ec26, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733349151192 2024-12-04T21:52:31,326 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] compactions.Compactor(225): Compacting 46a35270695c4abeb47e15d8b4b28783, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733349151236 2024-12-04T21:52:31,344 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f4d8f05e8a76a38fda262a0f09617252#info#compaction#91 average throughput is 39.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-04T21:52:31,345 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/e050bd7c83454e7d95ffe089e88d7a44 is 1080, key is row0062/info:/1733349128760/Put/seqid=0 2024-12-04T21:52:31,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741878_1054 (size=212365) 2024-12-04T21:52:31,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741878_1054 (size=212365) 2024-12-04T21:52:31,367 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/e050bd7c83454e7d95ffe089e88d7a44 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/e050bd7c83454e7d95ffe089e88d7a44 2024-12-04T21:52:31,379 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f4d8f05e8a76a38fda262a0f09617252/info of f4d8f05e8a76a38fda262a0f09617252 into e050bd7c83454e7d95ffe089e88d7a44(size=207.4 K), total size for store is 207.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-04T21:52:31,379 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:31,379 INFO [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252., storeName=f4d8f05e8a76a38fda262a0f09617252/info, priority=13, startTime=1733349151322; duration=0sec 2024-12-04T21:52:31,379 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-04T21:52:31,379 DEBUG [RS:0;bb3046a53f79:46213-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f4d8f05e8a76a38fda262a0f09617252:info 2024-12-04T21:52:31,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:32,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:32,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:32,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:33,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:33,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:33,290 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-04T21:52:33,291 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46213%2C1733349115759.1733349153291 2024-12-04T21:52:33,299 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,299 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,299 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,300 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,300 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349116136 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349153291 2024-12-04T21:52:33,301 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:33067:33067)] 2024-12-04T21:52:33,301 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349116136 is not closed yet, will try archiving it next time 2024-12-04T21:52:33,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741833_1009 (size=317837) 2024-12-04T21:52:33,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741833_1009 (size=317837) 2024-12-04T21:52:33,305 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 46be0065dc8998c42a2f9522ae13b7f5: 2024-12-04T21:52:33,305 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-04T21:52:33,309 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/info/303ef9b7ae0c470ab6d8e41698d7e702 is 193, key is TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252./info:regioninfo/1733349131566/Put/seqid=0 2024-12-04T21:52:33,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741880_1056 (size=6223) 2024-12-04T21:52:33,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741880_1056 (size=6223) 2024-12-04T21:52:33,314 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/info/303ef9b7ae0c470ab6d8e41698d7e702 2024-12-04T21:52:33,319 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/.tmp/info/303ef9b7ae0c470ab6d8e41698d7e702 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/info/303ef9b7ae0c470ab6d8e41698d7e702 2024-12-04T21:52:33,323 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/info/303ef9b7ae0c470ab6d8e41698d7e702, entries=5, sequenceid=21, filesize=6.1 K 2024-12-04T21:52:33,324 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-12-04T21:52:33,324 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-04T21:52:33,324 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f4d8f05e8a76a38fda262a0f09617252 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-04T21:52:33,330 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/0f25caba5ce04addb16f44979e69b4d8 is 1080, key is row0253/info:/1733349151280/Put/seqid=0 2024-12-04T21:52:33,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741881_1057 (size=9278) 2024-12-04T21:52:33,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741881_1057 (size=9278) 2024-12-04T21:52:33,334 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/0f25caba5ce04addb16f44979e69b4d8 2024-12-04T21:52:33,339 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/.tmp/info/0f25caba5ce04addb16f44979e69b4d8 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0f25caba5ce04addb16f44979e69b4d8 2024-12-04T21:52:33,344 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0f25caba5ce04addb16f44979e69b4d8, entries=4, sequenceid=343, filesize=9.1 K 2024-12-04T21:52:33,345 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for f4d8f05e8a76a38fda262a0f09617252 in 21ms, sequenceid=343, compaction requested=false 2024-12-04T21:52:33,345 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f4d8f05e8a76a38fda262a0f09617252: 2024-12-04T21:52:33,345 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C46213%2C1733349115759.1733349153345 2024-12-04T21:52:33,350 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,351 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,351 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,351 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,351 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,351 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349153291 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349153345 2024-12-04T21:52:33,352 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37687:37687),(127.0.0.1/127.0.0.1:33067:33067)] 2024-12-04T21:52:33,352 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349153291 is not closed yet, will try archiving it next time 2024-12-04T21:52:33,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741879_1055 (size=731) 2024-12-04T21:52:33,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741879_1055 (size=731) 2024-12-04T21:52:33,353 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-04T21:52:33,353 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349116136 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/oldWALs/bb3046a53f79%2C46213%2C1733349115759.1733349116136 2024-12-04T21:52:33,354 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/WALs/bb3046a53f79,46213,1733349115759/bb3046a53f79%2C46213%2C1733349115759.1733349153291 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/oldWALs/bb3046a53f79%2C46213%2C1733349115759.1733349153291 2024-12-04T21:52:33,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:33,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:52:33,453 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:52:33,453 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:52:33,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:33,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:33,454 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:52:33,454 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:52:33,454 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1265953606, stopped=false 2024-12-04T21:52:33,454 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,45219,1733349115717 2024-12-04T21:52:33,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:52:33,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:52:33,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:33,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:33,456 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:52:33,457 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:52:33,457 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:52:33,457 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:33,458 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,46213,1733349115759' ***** 2024-12-04T21:52:33,458 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:52:33,458 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:52:33,458 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:52:33,459 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:52:33,459 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:52:33,459 INFO [RS:0;bb3046a53f79:46213 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:52:33,460 INFO [RS:0;bb3046a53f79:46213 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:52:33,460 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(3091): Received CLOSE for 46be0065dc8998c42a2f9522ae13b7f5 2024-12-04T21:52:33,460 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(3091): Received CLOSE for f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:33,460 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,46213,1733349115759 2024-12-04T21:52:33,460 INFO [RS:0;bb3046a53f79:46213 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:52:33,460 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 46be0065dc8998c42a2f9522ae13b7f5, disabling compactions & flushes 2024-12-04T21:52:33,460 INFO [RS:0;bb3046a53f79:46213 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:46213. 2024-12-04T21:52:33,460 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:33,460 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:33,461 DEBUG [RS:0;bb3046a53f79:46213 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:52:33,461 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. after waiting 0 ms 2024-12-04T21:52:33,461 DEBUG [RS:0;bb3046a53f79:46213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:33,461 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:33,461 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:52:33,461 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:52:33,461 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:52:33,461 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:52:33,461 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-04T21:52:33,461 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1325): Online Regions={46be0065dc8998c42a2f9522ae13b7f5=TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5., 1588230740=hbase:meta,,1.1588230740, f4d8f05e8a76a38fda262a0f09617252=TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.} 2024-12-04T21:52:33,461 DEBUG [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 46be0065dc8998c42a2f9522ae13b7f5, f4d8f05e8a76a38fda262a0f09617252 2024-12-04T21:52:33,462 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:52:33,462 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:52:33,462 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:52:33,462 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:52:33,462 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:52:33,461 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030->hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b-bottom] to archive 2024-12-04T21:52:33,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T21:52:33,465 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:33,465 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bb3046a53f79:45219 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-04T21:52:33,465 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-04T21:52:33,466 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-04T21:52:33,467 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:52:33,467 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:52:33,467 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349153461Running coprocessor pre-close hooks at 1733349153461Disabling compacts and flushes for region at 1733349153461Disabling writes for close at 1733349153462 (+1 ms)Writing region close event to WAL at 1733349153463 (+1 ms)Running coprocessor post-close hooks at 1733349153467 (+4 ms)Closed at 1733349153467 2024-12-04T21:52:33,467 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:52:33,469 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/46be0065dc8998c42a2f9522ae13b7f5/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-12-04T21:52:33,469 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:33,469 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 46be0065dc8998c42a2f9522ae13b7f5: Waiting for close lock at 1733349153460Running coprocessor pre-close hooks at 1733349153460Disabling compacts and flushes for region at 1733349153460Disabling writes for close at 1733349153461 (+1 ms)Writing region close event to WAL at 1733349153466 (+5 ms)Running coprocessor post-close hooks at 1733349153469 (+3 ms)Closed at 1733349153469 2024-12-04T21:52:33,469 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733349130854.46be0065dc8998c42a2f9522ae13b7f5. 2024-12-04T21:52:33,469 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f4d8f05e8a76a38fda262a0f09617252, disabling compactions & flushes 2024-12-04T21:52:33,469 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:33,469 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:33,470 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. after waiting 0 ms 2024-12-04T21:52:33,470 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:33,470 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030->hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/4185395759ca66fa95c987c9f7e36030/info/ec5c97d16d3941f6bb8003badf88ea7b-top, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/578df3b2c2804f47ae8393cb8def757d, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53789a08f54c408488759e4ff35163c0, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53e786a33828420fa9825aa12bcfef03, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/20fb6bf04a1d48d39ef171cf9bc37bbc, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/a76ab38637c44bf5a4dc6b72f4e53dee, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/c5db1824f9344636bf5807b9a9ef983e, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/76c49853e44c4377b95fa18541c0943e, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/047a0aa0632f47c6bea66b0cef80a413, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ca9b4fa8fd1849eaa5c00c946d0958bd, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/89bdaee656024a0c8ac3695f541421a4, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/2bdcd66d8cb44268a05ee3b8f61ed8b9, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/93e0df9a561e418daa21523cda37de76, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/15961a30cf5249ec84d82ce308a5060b, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/560a535129f045cb8370a7907644a31a, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f6e06f55b1824106bb646513ef587e30, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/824d18f64d9640989f17c56dfce3a42f, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/619a5e06b01349dabab4891ab853d744, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f04afd83472c41889742a4ae579c9774, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/3e8e8471e7864a91b3c473ec91d75d89, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0bfba581673a4eb1b1e567287873ec26, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/46a35270695c4abeb47e15d8b4b28783] to archive 2024-12-04T21:52:33,471 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-04T21:52:33,473 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ec5c97d16d3941f6bb8003badf88ea7b.4185395759ca66fa95c987c9f7e36030 2024-12-04T21:52:33,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-22fe535c03fb4fbcb4a1f8d63b0b7678 2024-12-04T21:52:33,475 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/578df3b2c2804f47ae8393cb8def757d to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/578df3b2c2804f47ae8393cb8def757d 2024-12-04T21:52:33,476 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/TestLogRolling-testLogRolling=4185395759ca66fa95c987c9f7e36030-a6a5b130fb4a4ad987dc58747f561d52 2024-12-04T21:52:33,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53789a08f54c408488759e4ff35163c0 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53789a08f54c408488759e4ff35163c0 2024-12-04T21:52:33,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53e786a33828420fa9825aa12bcfef03 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/53e786a33828420fa9825aa12bcfef03 2024-12-04T21:52:33,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/20fb6bf04a1d48d39ef171cf9bc37bbc to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/20fb6bf04a1d48d39ef171cf9bc37bbc 2024-12-04T21:52:33,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/a76ab38637c44bf5a4dc6b72f4e53dee to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/a76ab38637c44bf5a4dc6b72f4e53dee 2024-12-04T21:52:33,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/c5db1824f9344636bf5807b9a9ef983e to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/c5db1824f9344636bf5807b9a9ef983e 2024-12-04T21:52:33,483 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/76c49853e44c4377b95fa18541c0943e to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/76c49853e44c4377b95fa18541c0943e 2024-12-04T21:52:33,484 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/047a0aa0632f47c6bea66b0cef80a413 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/047a0aa0632f47c6bea66b0cef80a413 2024-12-04T21:52:33,485 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ca9b4fa8fd1849eaa5c00c946d0958bd to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/ca9b4fa8fd1849eaa5c00c946d0958bd 2024-12-04T21:52:33,486 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/89bdaee656024a0c8ac3695f541421a4 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/89bdaee656024a0c8ac3695f541421a4 2024-12-04T21:52:33,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/2bdcd66d8cb44268a05ee3b8f61ed8b9 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/2bdcd66d8cb44268a05ee3b8f61ed8b9 2024-12-04T21:52:33,488 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/93e0df9a561e418daa21523cda37de76 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/93e0df9a561e418daa21523cda37de76 2024-12-04T21:52:33,489 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/15961a30cf5249ec84d82ce308a5060b to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/15961a30cf5249ec84d82ce308a5060b 2024-12-04T21:52:33,490 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/560a535129f045cb8370a7907644a31a to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/560a535129f045cb8370a7907644a31a 2024-12-04T21:52:33,491 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f6e06f55b1824106bb646513ef587e30 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f6e06f55b1824106bb646513ef587e30 2024-12-04T21:52:33,491 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/824d18f64d9640989f17c56dfce3a42f to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/824d18f64d9640989f17c56dfce3a42f 2024-12-04T21:52:33,492 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/619a5e06b01349dabab4891ab853d744 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/619a5e06b01349dabab4891ab853d744 2024-12-04T21:52:33,493 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f04afd83472c41889742a4ae579c9774 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/f04afd83472c41889742a4ae579c9774 2024-12-04T21:52:33,494 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/3e8e8471e7864a91b3c473ec91d75d89 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/3e8e8471e7864a91b3c473ec91d75d89 2024-12-04T21:52:33,495 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0bfba581673a4eb1b1e567287873ec26 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/0bfba581673a4eb1b1e567287873ec26 2024-12-04T21:52:33,496 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/46a35270695c4abeb47e15d8b4b28783 to hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/archive/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/info/46a35270695c4abeb47e15d8b4b28783 2024-12-04T21:52:33,496 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [578df3b2c2804f47ae8393cb8def757d=42984, 53789a08f54c408488759e4ff35163c0=12516, 53e786a33828420fa9825aa12bcfef03=62558, 20fb6bf04a1d48d39ef171cf9bc37bbc=16828, a76ab38637c44bf5a4dc6b72f4e53dee=21156, c5db1824f9344636bf5807b9a9ef983e=90765, 76c49853e44c4377b95fa18541c0943e=16828, 047a0aa0632f47c6bea66b0cef80a413=17906, ca9b4fa8fd1849eaa5c00c946d0958bd=113509, 89bdaee656024a0c8ac3695f541421a4=14672, 2bdcd66d8cb44268a05ee3b8f61ed8b9=16828, 93e0df9a561e418daa21523cda37de76=137463, 15961a30cf5249ec84d82ce308a5060b=16828, 560a535129f045cb8370a7907644a31a=15751, f6e06f55b1824106bb646513ef587e30=161367, 824d18f64d9640989f17c56dfce3a42f=17918, 619a5e06b01349dabab4891ab853d744=16839, f04afd83472c41889742a4ae579c9774=183053, 3e8e8471e7864a91b3c473ec91d75d89=14681, 0bfba581673a4eb1b1e567287873ec26=20092, 46a35270695c4abeb47e15d8b4b28783=19013] 2024-12-04T21:52:33,500 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/data/default/TestLogRolling-testLogRolling/f4d8f05e8a76a38fda262a0f09617252/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-12-04T21:52:33,500 INFO [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:33,500 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f4d8f05e8a76a38fda262a0f09617252: Waiting for close lock at 1733349153469Running coprocessor pre-close hooks at 1733349153469Disabling compacts and flushes for region at 1733349153469Disabling writes for close at 1733349153470 (+1 ms)Writing region close event to WAL at 1733349153496 (+26 ms)Running coprocessor post-close hooks at 1733349153500 (+4 ms)Closed at 1733349153500 2024-12-04T21:52:33,500 DEBUG [RS_CLOSE_REGION-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733349130854.f4d8f05e8a76a38fda262a0f09617252. 2024-12-04T21:52:33,662 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,46213,1733349115759; all regions closed. 2024-12-04T21:52:33,662 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,662 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741834_1010 (size=8107) 2024-12-04T21:52:33,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741834_1010 (size=8107) 2024-12-04T21:52:33,666 DEBUG [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/oldWALs 2024-12-04T21:52:33,666 INFO [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C46213%2C1733349115759.meta:.meta(num 1733349116498) 2024-12-04T21:52:33,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741882_1058 (size=780) 2024-12-04T21:52:33,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741882_1058 (size=780) 2024-12-04T21:52:33,670 DEBUG [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/oldWALs 2024-12-04T21:52:33,670 INFO [RS:0;bb3046a53f79:46213 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C46213%2C1733349115759:(num 1733349153345) 2024-12-04T21:52:33,670 DEBUG [RS:0;bb3046a53f79:46213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:33,670 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:52:33,670 INFO [RS:0;bb3046a53f79:46213 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:52:33,670 INFO [RS:0;bb3046a53f79:46213 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-04T21:52:33,671 INFO [RS:0;bb3046a53f79:46213 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:52:33,671 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:52:33,671 INFO [RS:0;bb3046a53f79:46213 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46213 2024-12-04T21:52:33,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,46213,1733349115759 2024-12-04T21:52:33,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:52:33,672 INFO [RS:0;bb3046a53f79:46213 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:52:33,672 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,46213,1733349115759] 2024-12-04T21:52:33,673 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,46213,1733349115759 already deleted, retry=false 2024-12-04T21:52:33,673 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,46213,1733349115759 expired; onlineServers=0 2024-12-04T21:52:33,673 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,45219,1733349115717' ***** 2024-12-04T21:52:33,673 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:52:33,673 INFO [M:0;bb3046a53f79:45219 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:52:33,673 INFO [M:0;bb3046a53f79:45219 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:52:33,673 DEBUG [M:0;bb3046a53f79:45219 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:52:33,673 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:52:33,673 DEBUG [M:0;bb3046a53f79:45219 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:52:33,673 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349115891 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349115891,5,FailOnTimeoutGroup] 2024-12-04T21:52:33,674 INFO [M:0;bb3046a53f79:45219 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:52:33,674 INFO [M:0;bb3046a53f79:45219 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:52:33,674 DEBUG [M:0;bb3046a53f79:45219 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:52:33,674 INFO [M:0;bb3046a53f79:45219 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:52:33,674 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349115893 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349115893,5,FailOnTimeoutGroup] 2024-12-04T21:52:33,674 INFO [M:0;bb3046a53f79:45219 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:52:33,674 INFO [M:0;bb3046a53f79:45219 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:52:33,674 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:52:33,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:52:33,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:33,674 DEBUG [M:0;bb3046a53f79:45219 {}] zookeeper.ZKUtil(347): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:52:33,674 WARN [M:0;bb3046a53f79:45219 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:52:33,675 INFO [M:0;bb3046a53f79:45219 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/.lastflushedseqids 2024-12-04T21:52:33,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741883_1059 (size=228) 2024-12-04T21:52:33,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741883_1059 (size=228) 2024-12-04T21:52:33,680 INFO [M:0;bb3046a53f79:45219 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:52:33,680 INFO [M:0;bb3046a53f79:45219 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:52:33,680 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:52:33,680 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:33,680 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:33,681 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:52:33,681 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:33,681 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.44 KB heapSize=63.39 KB 2024-12-04T21:52:33,694 DEBUG [M:0;bb3046a53f79:45219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b91dbfbab8f496ca980670a23cbbef1 is 82, key is hbase:meta,,1/info:regioninfo/1733349116526/Put/seqid=0 2024-12-04T21:52:33,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741884_1060 (size=5672) 2024-12-04T21:52:33,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741884_1060 (size=5672) 2024-12-04T21:52:33,699 INFO [M:0;bb3046a53f79:45219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b91dbfbab8f496ca980670a23cbbef1 2024-12-04T21:52:33,715 DEBUG [M:0;bb3046a53f79:45219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/feaf46d8193c4c5891d342906d9e4ef0 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733349116970/Put/seqid=0 2024-12-04T21:52:33,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741885_1061 (size=7091) 2024-12-04T21:52:33,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741885_1061 (size=7091) 2024-12-04T21:52:33,719 INFO [M:0;bb3046a53f79:45219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/feaf46d8193c4c5891d342906d9e4ef0 2024-12-04T21:52:33,723 INFO [M:0;bb3046a53f79:45219 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for feaf46d8193c4c5891d342906d9e4ef0 2024-12-04T21:52:33,736 DEBUG [M:0;bb3046a53f79:45219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4929079c017d44cc94c283204fc10c2f is 69, key is bb3046a53f79,46213,1733349115759/rs:state/1733349115999/Put/seqid=0 2024-12-04T21:52:33,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741886_1062 (size=5156) 2024-12-04T21:52:33,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741886_1062 (size=5156) 2024-12-04T21:52:33,740 INFO [M:0;bb3046a53f79:45219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4929079c017d44cc94c283204fc10c2f 2024-12-04T21:52:33,755 DEBUG [M:0;bb3046a53f79:45219 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0890fa20cf0144fabea5991605c3e459 is 52, key is load_balancer_on/state:d/1733349116585/Put/seqid=0 2024-12-04T21:52:33,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741887_1063 (size=5056) 2024-12-04T21:52:33,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741887_1063 (size=5056) 2024-12-04T21:52:33,760 INFO [M:0;bb3046a53f79:45219 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0890fa20cf0144fabea5991605c3e459 2024-12-04T21:52:33,764 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b91dbfbab8f496ca980670a23cbbef1 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b91dbfbab8f496ca980670a23cbbef1 2024-12-04T21:52:33,768 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b91dbfbab8f496ca980670a23cbbef1, entries=8, sequenceid=125, filesize=5.5 K 2024-12-04T21:52:33,769 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/feaf46d8193c4c5891d342906d9e4ef0 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/feaf46d8193c4c5891d342906d9e4ef0 2024-12-04T21:52:33,773 INFO [RS:0;bb3046a53f79:46213 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:52:33,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:33,773 INFO [RS:0;bb3046a53f79:46213 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,46213,1733349115759; zookeeper connection closed. 2024-12-04T21:52:33,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46213-0x100a737e39f0001, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:33,773 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7f7e0575 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7f7e0575 2024-12-04T21:52:33,773 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T21:52:33,773 INFO [M:0;bb3046a53f79:45219 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for feaf46d8193c4c5891d342906d9e4ef0 2024-12-04T21:52:33,773 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/feaf46d8193c4c5891d342906d9e4ef0, entries=13, sequenceid=125, filesize=6.9 K 2024-12-04T21:52:33,774 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4929079c017d44cc94c283204fc10c2f as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4929079c017d44cc94c283204fc10c2f 2024-12-04T21:52:33,778 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4929079c017d44cc94c283204fc10c2f, entries=1, sequenceid=125, filesize=5.0 K 2024-12-04T21:52:33,779 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0890fa20cf0144fabea5991605c3e459 as hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0890fa20cf0144fabea5991605c3e459 2024-12-04T21:52:33,783 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36245/user/jenkins/test-data/49c4b25c-f7e6-16b2-44bc-765874b79362/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0890fa20cf0144fabea5991605c3e459, entries=1, sequenceid=125, filesize=4.9 K 2024-12-04T21:52:33,784 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 103ms, sequenceid=125, compaction requested=false 2024-12-04T21:52:33,785 INFO [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:33,785 DEBUG [M:0;bb3046a53f79:45219 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349153680Disabling compacts and flushes for region at 1733349153680Disabling writes for close at 1733349153681 (+1 ms)Obtaining lock to block concurrent updates at 1733349153681Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733349153681Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52675, getHeapSize=64848, getOffHeapSize=0, getCellsCount=148 at 1733349153681Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733349153681Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733349153682 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733349153694 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733349153694Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733349153702 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733349153714 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733349153715 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733349153723 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733349153736 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733349153736Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733349153744 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733349153755 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733349153755Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77ef3ace: reopening flushed file at 1733349153763 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6988a769: reopening flushed file at 1733349153768 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7355aade: reopening flushed file at 1733349153773 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bcee8a6: reopening flushed file at 1733349153778 (+5 ms)Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 103ms, sequenceid=125, compaction requested=false at 1733349153784 (+6 ms)Writing region close event to WAL at 1733349153785 (+1 ms)Closed at 1733349153785 2024-12-04T21:52:33,786 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,786 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,786 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,786 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,787 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:33,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38497 is added to blk_1073741830_1006 (size=61344) 2024-12-04T21:52:33,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36595 is added to blk_1073741830_1006 (size=61344) 2024-12-04T21:52:33,791 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:52:33,791 INFO [M:0;bb3046a53f79:45219 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:52:33,791 INFO [M:0;bb3046a53f79:45219 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45219 2024-12-04T21:52:33,791 INFO [M:0;bb3046a53f79:45219 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:52:33,892 INFO [M:0;bb3046a53f79:45219 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:52:33,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:33,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45219-0x100a737e39f0000, quorum=127.0.0.1:57789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:33,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d113b0b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:52:33,894 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55cf2d5e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:52:33,894 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:52:33,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@de978e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:52:33,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67cee3f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir/,STOPPED} 2024-12-04T21:52:33,896 WARN [BP-42441024-172.17.0.2-1733349115130 heartbeating to localhost/127.0.0.1:36245 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:52:33,896 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:52:33,896 WARN [BP-42441024-172.17.0.2-1733349115130 heartbeating to localhost/127.0.0.1:36245 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42441024-172.17.0.2-1733349115130 (Datanode Uuid 442bddc6-ad31-4d5d-9f07-1a08e8561321) service to localhost/127.0.0.1:36245 2024-12-04T21:52:33,896 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:52:33,897 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data3/current/BP-42441024-172.17.0.2-1733349115130 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:33,897 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data4/current/BP-42441024-172.17.0.2-1733349115130 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:33,897 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:52:33,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5741d7ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:52:33,899 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d2ea6ab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:52:33,899 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:52:33,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46455419{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:52:33,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d5d7e10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir/,STOPPED} 2024-12-04T21:52:33,900 WARN [BP-42441024-172.17.0.2-1733349115130 heartbeating to localhost/127.0.0.1:36245 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:52:33,900 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:52:33,900 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:52:33,900 WARN [BP-42441024-172.17.0.2-1733349115130 heartbeating to localhost/127.0.0.1:36245 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42441024-172.17.0.2-1733349115130 (Datanode Uuid 890c857c-9d13-43e5-993d-2d277d675b37) service to localhost/127.0.0.1:36245 2024-12-04T21:52:33,901 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data1/current/BP-42441024-172.17.0.2-1733349115130 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:33,901 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/cluster_bc47a72b-80cd-11a8-ac9b-161ccd494d85/data/data2/current/BP-42441024-172.17.0.2-1733349115130 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:33,901 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:52:33,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4aca7f10{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:52:33,906 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6deead78{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:52:33,906 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:52:33,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@367e9406{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:52:33,906 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f3519b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir/,STOPPED} 2024-12-04T21:52:33,912 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:52:33,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:52:33,944 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 208) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36245 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36245 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36245 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36245 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36245 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36245 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36245 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36245 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=137 (was 88) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2324 (was 2155) - AvailableMemoryMB LEAK? - 2024-12-04T21:52:33,951 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=137, ProcessCount=11, AvailableMemoryMB=2324 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.log.dir so I do NOT create it in target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0b900d76-9db5-3ce7-cfab-c7ec8f4501ae/hadoop.tmp.dir so I do NOT create it in target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54, deleteOnExit=true 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/test.cache.data in system properties and HBase conf 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.tmp.dir in system properties and HBase conf 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir in system properties and HBase conf 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-04T21:52:33,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-04T21:52:33,952 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/nfs.dump.dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/java.io.tmpdir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-04T21:52:33,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-04T21:52:33,965 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:52:34,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:52:34,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:52:34,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:52:34,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:52:34,004 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:52:34,005 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:52:34,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b70ef0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:52:34,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5815605{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:52:34,010 INFO [regionserver/bb3046a53f79:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:52:34,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:34,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:34,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ba22fb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/java.io.tmpdir/jetty-localhost-44185-hadoop-hdfs-3_4_1-tests_jar-_-any-5154840498078931330/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:52:34,098 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6789809{HTTP/1.1, (http/1.1)}{localhost:44185} 2024-12-04T21:52:34,098 INFO [Time-limited test {}] server.Server(415): Started @275028ms 2024-12-04T21:52:34,108 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-04T21:52:34,141 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:52:34,143 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:52:34,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:52:34,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:52:34,144 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-04T21:52:34,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e37dbe3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:52:34,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6708b3c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:52:34,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a060c72{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/java.io.tmpdir/jetty-localhost-44121-hadoop-hdfs-3_4_1-tests_jar-_-any-3264631134245263016/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:52:34,237 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7198cc34{HTTP/1.1, (http/1.1)}{localhost:44121} 2024-12-04T21:52:34,237 INFO [Time-limited test {}] server.Server(415): Started @275167ms 2024-12-04T21:52:34,237 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:52:34,260 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-04T21:52:34,263 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-04T21:52:34,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-04T21:52:34,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-04T21:52:34,263 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-04T21:52:34,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ca7b204{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir/,AVAILABLE} 2024-12-04T21:52:34,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b53c208{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-04T21:52:34,290 WARN [Thread-2491 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data1/current/BP-413037955-172.17.0.2-1733349153968/current, will proceed with Du for space computation calculation, 2024-12-04T21:52:34,290 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data2/current/BP-413037955-172.17.0.2-1733349153968/current, will proceed with Du for space computation calculation, 2024-12-04T21:52:34,309 WARN [Thread-2470 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:52:34,311 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd01271ce7dade8e with lease ID 0x259cc35cbee716ee: Processing first storage report for DS-18565509-679d-46e8-b8b4-d5df288db1b2 from datanode DatanodeRegistration(127.0.0.1:33745, datanodeUuid=b23f7eb2-266f-4487-92bd-d972b13c90a1, infoPort=41919, infoSecurePort=0, ipcPort=36071, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968) 2024-12-04T21:52:34,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd01271ce7dade8e with lease ID 0x259cc35cbee716ee: from storage DS-18565509-679d-46e8-b8b4-d5df288db1b2 node DatanodeRegistration(127.0.0.1:33745, datanodeUuid=b23f7eb2-266f-4487-92bd-d972b13c90a1, infoPort=41919, infoSecurePort=0, ipcPort=36071, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:52:34,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd01271ce7dade8e with lease ID 0x259cc35cbee716ee: Processing first storage report for DS-85895a0e-0892-4e0c-8fc8-286eee68ee07 from datanode DatanodeRegistration(127.0.0.1:33745, datanodeUuid=b23f7eb2-266f-4487-92bd-d972b13c90a1, infoPort=41919, infoSecurePort=0, ipcPort=36071, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968) 2024-12-04T21:52:34,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd01271ce7dade8e with lease ID 0x259cc35cbee716ee: from storage DS-85895a0e-0892-4e0c-8fc8-286eee68ee07 node DatanodeRegistration(127.0.0.1:33745, datanodeUuid=b23f7eb2-266f-4487-92bd-d972b13c90a1, infoPort=41919, infoSecurePort=0, ipcPort=36071, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:52:34,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49e8762f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/java.io.tmpdir/jetty-localhost-37553-hadoop-hdfs-3_4_1-tests_jar-_-any-9814127269153715178/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:52:34,357 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@695357ac{HTTP/1.1, (http/1.1)}{localhost:37553} 2024-12-04T21:52:34,357 INFO [Time-limited test {}] server.Server(415): Started @275287ms 2024-12-04T21:52:34,358 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-04T21:52:34,410 WARN [Thread-2517 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data3/current/BP-413037955-172.17.0.2-1733349153968/current, will proceed with Du for space computation calculation, 2024-12-04T21:52:34,410 WARN [Thread-2518 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data4/current/BP-413037955-172.17.0.2-1733349153968/current, will proceed with Du for space computation calculation, 2024-12-04T21:52:34,424 WARN [Thread-2506 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-04T21:52:34,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x27a574fc3622e69e with lease ID 0x259cc35cbee716ef: Processing first storage report for DS-1ffbd2fc-0352-4d1f-97f5-6e23b65508aa from datanode DatanodeRegistration(127.0.0.1:44073, datanodeUuid=0acafd75-3d61-4c39-995b-2e10969034eb, infoPort=40093, infoSecurePort=0, ipcPort=36043, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968) 2024-12-04T21:52:34,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x27a574fc3622e69e with lease ID 0x259cc35cbee716ef: from storage DS-1ffbd2fc-0352-4d1f-97f5-6e23b65508aa node DatanodeRegistration(127.0.0.1:44073, datanodeUuid=0acafd75-3d61-4c39-995b-2e10969034eb, infoPort=40093, infoSecurePort=0, ipcPort=36043, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:52:34,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x27a574fc3622e69e with lease ID 0x259cc35cbee716ef: Processing first storage report for DS-d7d667af-66c8-4075-a9c5-5637b7df745a from datanode DatanodeRegistration(127.0.0.1:44073, datanodeUuid=0acafd75-3d61-4c39-995b-2e10969034eb, infoPort=40093, infoSecurePort=0, ipcPort=36043, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968) 2024-12-04T21:52:34,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x27a574fc3622e69e with lease ID 0x259cc35cbee716ef: from storage DS-d7d667af-66c8-4075-a9c5-5637b7df745a node DatanodeRegistration(127.0.0.1:44073, datanodeUuid=0acafd75-3d61-4c39-995b-2e10969034eb, infoPort=40093, infoSecurePort=0, ipcPort=36043, storageInfo=lv=-57;cid=testClusterID;nsid=1966457774;c=1733349153968), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-04T21:52:34,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:34,477 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a 2024-12-04T21:52:34,479 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/zookeeper_0, clientPort=52101, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-04T21:52:34,480 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52101 2024-12-04T21:52:34,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:52:34,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741825_1001 (size=7) 2024-12-04T21:52:34,490 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e with version=8 2024-12-04T21:52:34,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44629/user/jenkins/test-data/4a3fa863-b223-335f-6eff-a027bd9b38be/hbase-staging 2024-12-04T21:52:34,493 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:52:34,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:52:34,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:52:34,493 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:52:34,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:52:34,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:52:34,494 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-04T21:52:34,494 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:52:34,494 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37307 2024-12-04T21:52:34,496 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37307 connecting to ZooKeeper ensemble=127.0.0.1:52101 2024-12-04T21:52:34,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:373070x0, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:52:34,500 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37307-0x100a7387b1d0000 connected 2024-12-04T21:52:34,514 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,517 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:52:34,517 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e, hbase.cluster.distributed=false 2024-12-04T21:52:34,519 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:52:34,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37307 2024-12-04T21:52:34,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37307 2024-12-04T21:52:34,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37307 2024-12-04T21:52:34,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37307 2024-12-04T21:52:34,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37307 2024-12-04T21:52:34,533 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bb3046a53f79:0 server-side Connection retries=45 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-04T21:52:34,533 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-04T21:52:34,534 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35315 2024-12-04T21:52:34,534 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35315 connecting to ZooKeeper ensemble=127.0.0.1:52101 2024-12-04T21:52:34,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:353150x0, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-04T21:52:34,540 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35315-0x100a7387b1d0001 connected 2024-12-04T21:52:34,540 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:52:34,540 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-04T21:52:34,540 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-04T21:52:34,541 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-04T21:52:34,542 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-04T21:52:34,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35315 2024-12-04T21:52:34,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35315 2024-12-04T21:52:34,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35315 2024-12-04T21:52:34,543 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35315 2024-12-04T21:52:34,543 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35315 2024-12-04T21:52:34,553 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bb3046a53f79:37307 2024-12-04T21:52:34,554 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:52:34,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:52:34,555 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-04T21:52:34,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,556 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-04T21:52:34,556 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bb3046a53f79,37307,1733349154493 from backup master directory 2024-12-04T21:52:34,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:52:34,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-04T21:52:34,557 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:52:34,557 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,560 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/hbase.id] with ID: 115b6df5-afa4-4bbc-af29-124e151596a7 2024-12-04T21:52:34,560 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/.tmp/hbase.id 2024-12-04T21:52:34,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:52:34,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741826_1002 (size=42) 2024-12-04T21:52:34,565 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/.tmp/hbase.id]:[hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/hbase.id] 2024-12-04T21:52:34,576 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:34,576 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-04T21:52:34,577 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-04T21:52:34,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:52:34,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741827_1003 (size=196) 2024-12-04T21:52:34,584 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-04T21:52:34,585 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-04T21:52:34,585 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:52:34,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:52:34,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741828_1004 (size=1189) 2024-12-04T21:52:34,592 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store 2024-12-04T21:52:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:52:34,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741829_1005 (size=34) 2024-12-04T21:52:34,597 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:52:34,597 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:52:34,598 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:34,598 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:34,598 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:52:34,598 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:34,598 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:34,598 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349154597Disabling compacts and flushes for region at 1733349154597Disabling writes for close at 1733349154598 (+1 ms)Writing region close event to WAL at 1733349154598Closed at 1733349154598 2024-12-04T21:52:34,598 WARN [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/.initializing 2024-12-04T21:52:34,598 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/WALs/bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,601 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C37307%2C1733349154493, suffix=, logDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/WALs/bb3046a53f79,37307,1733349154493, archiveDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/oldWALs, maxLogs=10 2024-12-04T21:52:34,601 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C37307%2C1733349154493.1733349154601 2024-12-04T21:52:34,605 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/WALs/bb3046a53f79,37307,1733349154493/bb3046a53f79%2C37307%2C1733349154493.1733349154601 2024-12-04T21:52:34,605 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41919:41919),(127.0.0.1/127.0.0.1:40093:40093)] 2024-12-04T21:52:34,606 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:52:34,606 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:52:34,606 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,606 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-04T21:52:34,608 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:34,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-04T21:52:34,609 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:52:34,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-04T21:52:34,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:52:34,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-04T21:52:34,611 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-04T21:52:34,612 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,612 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,612 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,613 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,613 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,614 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-04T21:52:34,615 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-04T21:52:34,617 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:52:34,617 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755008, jitterRate=-0.03995878994464874}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-04T21:52:34,618 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733349154606Initializing all the Stores at 1733349154607 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349154607Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349154607Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349154607Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349154607Cleaning up temporary data from old regions at 1733349154613 (+6 ms)Region opened successfully at 1733349154618 (+5 ms) 2024-12-04T21:52:34,618 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-04T21:52:34,621 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@191a5cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:52:34,622 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-04T21:52:34,622 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-04T21:52:34,622 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-04T21:52:34,622 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-04T21:52:34,623 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-04T21:52:34,623 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-04T21:52:34,623 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-04T21:52:34,625 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-04T21:52:34,626 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-04T21:52:34,627 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-04T21:52:34,627 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-04T21:52:34,628 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-04T21:52:34,628 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-04T21:52:34,628 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-04T21:52:34,629 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-04T21:52:34,630 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-04T21:52:34,631 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-04T21:52:34,631 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-04T21:52:34,633 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-04T21:52:34,634 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-04T21:52:34,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:52:34,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-04T21:52:34,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,635 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bb3046a53f79,37307,1733349154493, sessionid=0x100a7387b1d0000, setting cluster-up flag (Was=false) 2024-12-04T21:52:34,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,639 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-04T21:52:34,640 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:34,645 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-04T21:52:34,646 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bb3046a53f79,37307,1733349154493 2024-12-04T21:52:34,647 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-04T21:52:34,649 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-04T21:52:34,649 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-04T21:52:34,649 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-04T21:52:34,649 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bb3046a53f79,37307,1733349154493 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bb3046a53f79:0, corePoolSize=5, maxPoolSize=5 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bb3046a53f79:0, corePoolSize=10, maxPoolSize=10 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:52:34,651 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733349184652 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-04T21:52:34,652 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:52:34,652 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-04T21:52:34,652 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-04T21:52:34,653 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-04T21:52:34,653 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349154653,5,FailOnTimeoutGroup] 2024-12-04T21:52:34,653 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-04T21:52:34,653 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349154653,5,FailOnTimeoutGroup] 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,653 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-04T21:52:34,654 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,654 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:52:34,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741831_1007 (size=1321) 2024-12-04T21:52:34,659 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-04T21:52:34,659 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e 2024-12-04T21:52:34,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:52:34,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741832_1008 (size=32) 2024-12-04T21:52:34,665 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:52:34,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:52:34,667 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:52:34,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:34,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:52:34,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:52:34,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:34,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:52:34,670 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:52:34,670 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:34,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:52:34,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:52:34,672 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:34,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:34,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:52:34,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740 2024-12-04T21:52:34,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740 2024-12-04T21:52:34,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:52:34,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:52:34,675 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:52:34,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:52:34,677 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-04T21:52:34,678 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795935, jitterRate=0.012084335088729858}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:52:34,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733349154665Initializing all the Stores at 1733349154666 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349154666Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349154666Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349154666Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349154666Cleaning up temporary data from old regions at 1733349154674 (+8 ms)Region opened successfully at 1733349154678 (+4 ms) 2024-12-04T21:52:34,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:52:34,678 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:52:34,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:52:34,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:52:34,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:52:34,679 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:52:34,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349154678Disabling compacts and flushes for region at 1733349154678Disabling writes for close at 1733349154679 (+1 ms)Writing region close event to WAL at 1733349154679Closed at 1733349154679 2024-12-04T21:52:34,680 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:52:34,680 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-04T21:52:34,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-04T21:52:34,681 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:52:34,682 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-04T21:52:34,746 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(746): ClusterId : 115b6df5-afa4-4bbc-af29-124e151596a7 2024-12-04T21:52:34,746 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-04T21:52:34,749 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-04T21:52:34,749 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-04T21:52:34,752 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-04T21:52:34,752 DEBUG [RS:0;bb3046a53f79:35315 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc964cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bb3046a53f79/172.17.0.2:0 2024-12-04T21:52:34,768 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bb3046a53f79:35315 2024-12-04T21:52:34,768 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-04T21:52:34,768 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-04T21:52:34,768 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-04T21:52:34,768 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(2659): reportForDuty to master=bb3046a53f79,37307,1733349154493 with port=35315, startcode=1733349154532 2024-12-04T21:52:34,769 DEBUG [RS:0;bb3046a53f79:35315 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-04T21:52:34,770 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45983, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-04T21:52:34,771 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37307 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bb3046a53f79,35315,1733349154532 2024-12-04T21:52:34,771 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37307 {}] master.ServerManager(517): Registering regionserver=bb3046a53f79,35315,1733349154532 2024-12-04T21:52:34,772 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e 2024-12-04T21:52:34,772 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46395 2024-12-04T21:52:34,772 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-04T21:52:34,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:52:34,774 DEBUG [RS:0;bb3046a53f79:35315 {}] zookeeper.ZKUtil(111): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bb3046a53f79,35315,1733349154532 2024-12-04T21:52:34,774 WARN [RS:0;bb3046a53f79:35315 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-04T21:52:34,774 INFO [RS:0;bb3046a53f79:35315 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:52:34,774 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/bb3046a53f79,35315,1733349154532 2024-12-04T21:52:34,774 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bb3046a53f79,35315,1733349154532] 2024-12-04T21:52:34,777 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-04T21:52:34,778 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-04T21:52:34,778 INFO [RS:0;bb3046a53f79:35315 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-04T21:52:34,778 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,779 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-04T21:52:34,779 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-04T21:52:34,779 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,779 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,779 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bb3046a53f79:0, corePoolSize=2, maxPoolSize=2 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bb3046a53f79:0, corePoolSize=1, maxPoolSize=1 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:52:34,780 DEBUG [RS:0;bb3046a53f79:35315 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bb3046a53f79:0, corePoolSize=3, maxPoolSize=3 2024-12-04T21:52:34,780 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,780 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,780 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,780 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,780 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,780 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,35315,1733349154532-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:52:34,794 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-04T21:52:34,794 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,35315,1733349154532-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,794 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,794 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.Replication(171): bb3046a53f79,35315,1733349154532 started 2024-12-04T21:52:34,806 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:34,806 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1482): Serving as bb3046a53f79,35315,1733349154532, RpcServer on bb3046a53f79/172.17.0.2:35315, sessionid=0x100a7387b1d0001 2024-12-04T21:52:34,806 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-04T21:52:34,806 DEBUG [RS:0;bb3046a53f79:35315 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bb3046a53f79,35315,1733349154532 2024-12-04T21:52:34,806 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,35315,1733349154532' 2024-12-04T21:52:34,806 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bb3046a53f79,35315,1733349154532 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bb3046a53f79,35315,1733349154532' 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-04T21:52:34,807 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-04T21:52:34,808 DEBUG [RS:0;bb3046a53f79:35315 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-04T21:52:34,808 INFO [RS:0;bb3046a53f79:35315 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-04T21:52:34,808 INFO [RS:0;bb3046a53f79:35315 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-04T21:52:34,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:52:34,832 WARN [bb3046a53f79:37307 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-04T21:52:34,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-04T21:52:34,832 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-04T21:52:34,913 INFO [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C35315%2C1733349154532, suffix=, logDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/bb3046a53f79,35315,1733349154532, archiveDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs, maxLogs=32 2024-12-04T21:52:34,914 INFO [RS:0;bb3046a53f79:35315 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C35315%2C1733349154532.1733349154913 2024-12-04T21:52:34,922 INFO [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/bb3046a53f79,35315,1733349154532/bb3046a53f79%2C35315%2C1733349154532.1733349154913 2024-12-04T21:52:34,924 DEBUG [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40093:40093),(127.0.0.1/127.0.0.1:41919:41919)] 2024-12-04T21:52:35,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,42727,1733348980042/bb3046a53f79%2C42727%2C1733348980042.1733348980243 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:35,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/WALs/bb3046a53f79,39097,1733348978686/bb3046a53f79%2C39097%2C1733348978686.meta.1733348979893.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:35,082 DEBUG [bb3046a53f79:37307 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-04T21:52:35,083 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bb3046a53f79,35315,1733349154532 2024-12-04T21:52:35,086 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,35315,1733349154532, state=OPENING 2024-12-04T21:52:35,089 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-04T21:52:35,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:35,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:35,093 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-04T21:52:35,093 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,35315,1733349154532}] 2024-12-04T21:52:35,093 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:52:35,093 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:52:35,247 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-04T21:52:35,252 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41391, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-04T21:52:35,257 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-04T21:52:35,257 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:52:35,260 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bb3046a53f79%2C35315%2C1733349154532.meta, suffix=.meta, logDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/bb3046a53f79,35315,1733349154532, archiveDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs, maxLogs=32 2024-12-04T21:52:35,260 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bb3046a53f79%2C35315%2C1733349154532.meta.1733349155260.meta 2024-12-04T21:52:35,266 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/bb3046a53f79,35315,1733349154532/bb3046a53f79%2C35315%2C1733349154532.meta.1733349155260.meta 2024-12-04T21:52:35,269 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41919:41919),(127.0.0.1/127.0.0.1:40093:40093)] 2024-12-04T21:52:35,273 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-04T21:52:35,273 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-04T21:52:35,273 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-04T21:52:35,273 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-04T21:52:35,274 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-04T21:52:35,274 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-04T21:52:35,274 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-04T21:52:35,274 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-04T21:52:35,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-04T21:52:35,276 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-04T21:52:35,276 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:35,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:35,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-04T21:52:35,277 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-04T21:52:35,277 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:35,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:35,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-04T21:52:35,278 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-04T21:52:35,278 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:35,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:35,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-04T21:52:35,279 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-04T21:52:35,279 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-04T21:52:35,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-04T21:52:35,280 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-04T21:52:35,280 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740 2024-12-04T21:52:35,281 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740 2024-12-04T21:52:35,282 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-04T21:52:35,282 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-04T21:52:35,282 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-04T21:52:35,283 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-04T21:52:35,284 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873665, jitterRate=0.11092345416545868}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-04T21:52:35,284 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-04T21:52:35,284 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733349155274Writing region info on filesystem at 1733349155274Initializing all the Stores at 1733349155275 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349155275Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349155275Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733349155275Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733349155275Cleaning up temporary data from old regions at 1733349155282 (+7 ms)Running coprocessor post-open hooks at 1733349155284 (+2 ms)Region opened successfully at 1733349155284 2024-12-04T21:52:35,285 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733349155247 2024-12-04T21:52:35,287 DEBUG [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-04T21:52:35,287 INFO [RS_OPEN_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-04T21:52:35,287 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bb3046a53f79,35315,1733349154532 2024-12-04T21:52:35,288 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bb3046a53f79,35315,1733349154532, state=OPEN 2024-12-04T21:52:35,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:52:35,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-04T21:52:35,290 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bb3046a53f79,35315,1733349154532 2024-12-04T21:52:35,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:52:35,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-04T21:52:35,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-04T21:52:35,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bb3046a53f79,35315,1733349154532 in 197 msec 2024-12-04T21:52:35,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-04T21:52:35,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-12-04T21:52:35,294 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-04T21:52:35,294 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-04T21:52:35,295 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:52:35,295 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,35315,1733349154532, seqNum=-1] 2024-12-04T21:52:35,296 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:52:35,297 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42679, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:52:35,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-12-04T21:52:35,302 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733349155302, completionTime=-1 2024-12-04T21:52:35,302 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-04T21:52:35,302 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733349215304 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733349275304 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,37307,1733349154493-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,37307,1733349154493-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,37307,1733349154493-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bb3046a53f79:37307, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,304 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,306 DEBUG [master/bb3046a53f79:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.751sec 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,37307,1733349154493-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-04T21:52:35,308 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,37307,1733349154493-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-04T21:52:35,310 DEBUG [master/bb3046a53f79:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-04T21:52:35,310 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-04T21:52:35,310 INFO [master/bb3046a53f79:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bb3046a53f79,37307,1733349154493-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-04T21:52:35,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0aa271, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:52:35,345 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bb3046a53f79,37307,-1 for getting cluster id 2024-12-04T21:52:35,345 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-04T21:52:35,347 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '115b6df5-afa4-4bbc-af29-124e151596a7' 2024-12-04T21:52:35,347 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-04T21:52:35,347 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "115b6df5-afa4-4bbc-af29-124e151596a7" 2024-12-04T21:52:35,348 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1feceb8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:52:35,348 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bb3046a53f79,37307,-1] 2024-12-04T21:52:35,348 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-04T21:52:35,348 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:35,349 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-04T21:52:35,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@307592bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-04T21:52:35,351 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-04T21:52:35,352 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bb3046a53f79,35315,1733349154532, seqNum=-1] 2024-12-04T21:52:35,352 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-04T21:52:35,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-04T21:52:35,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bb3046a53f79,37307,1733349154493 2024-12-04T21:52:35,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-04T21:52:35,359 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-04T21:52:35,360 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-04T21:52:35,362 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs, maxLogs=32 2024-12-04T21:52:35,362 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733349155362 2024-12-04T21:52:35,367 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733349155362 2024-12-04T21:52:35,369 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40093:40093),(127.0.0.1/127.0.0.1:41919:41919)] 2024-12-04T21:52:35,370 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733349155369 2024-12-04T21:52:35,375 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,375 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,375 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,375 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,376 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733349155362 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733349155369 2024-12-04T21:52:35,377 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40093:40093),(127.0.0.1/127.0.0.1:41919:41919)] 2024-12-04T21:52:35,377 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733349155362 is not closed yet, will try archiving it next time 2024-12-04T21:52:35,377 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,377 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,377 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741835_1011 (size=93) 2024-12-04T21:52:35,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741835_1011 (size=93) 2024-12-04T21:52:35,378 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,378 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,379 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/WALs/test.com,8080,1/test.com%2C8080%2C1.1733349155362 to hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs/test.com%2C8080%2C1.1733349155362 2024-12-04T21:52:35,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741836_1012 (size=93) 2024-12-04T21:52:35,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741836_1012 (size=93) 2024-12-04T21:52:35,383 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs 2024-12-04T21:52:35,383 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733349155369) 2024-12-04T21:52:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-04T21:52:35,383 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:52:35,384 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:52:35,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:35,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:35,384 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-04T21:52:35,384 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-04T21:52:35,384 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=214847488, stopped=false 2024-12-04T21:52:35,384 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bb3046a53f79,37307,1733349154493 2024-12-04T21:52:35,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:52:35,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:35,385 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:52:35,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-04T21:52:35,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:35,386 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-04T21:52:35,386 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:52:35,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:35,386 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bb3046a53f79,35315,1733349154532' ***** 2024-12-04T21:52:35,386 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-04T21:52:35,386 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:52:35,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-04T21:52:35,387 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(959): stopping server bb3046a53f79,35315,1733349154532 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bb3046a53f79:35315. 2024-12-04T21:52:35,387 DEBUG [RS:0;bb3046a53f79:35315 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-04T21:52:35,387 DEBUG [RS:0;bb3046a53f79:35315 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-04T21:52:35,387 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-04T21:52:35,387 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-04T21:52:35,387 DEBUG [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-04T21:52:35,387 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-04T21:52:35,388 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-04T21:52:35,388 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-04T21:52:35,388 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-04T21:52:35,388 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-04T21:52:35,388 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-04T21:52:35,400 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/.tmp/ns/fdef8abd6a064243adaa45d6a66ab7e9 is 43, key is default/ns:d/1733349155297/Put/seqid=0 2024-12-04T21:52:35,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741837_1013 (size=5153) 2024-12-04T21:52:35,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741837_1013 (size=5153) 2024-12-04T21:52:35,405 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/.tmp/ns/fdef8abd6a064243adaa45d6a66ab7e9 2024-12-04T21:52:35,411 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/.tmp/ns/fdef8abd6a064243adaa45d6a66ab7e9 as hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/ns/fdef8abd6a064243adaa45d6a66ab7e9 2024-12-04T21:52:35,415 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/ns/fdef8abd6a064243adaa45d6a66ab7e9, entries=2, sequenceid=6, filesize=5.0 K 2024-12-04T21:52:35,416 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false 2024-12-04T21:52:35,419 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-04T21:52:35,420 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-04T21:52:35,420 INFO [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-04T21:52:35,420 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733349155387Running coprocessor pre-close hooks at 1733349155387Disabling compacts and flushes for region at 1733349155387Disabling writes for close at 1733349155388 (+1 ms)Obtaining lock to block concurrent updates at 1733349155388Preparing flush snapshotting stores in 1588230740 at 1733349155388Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733349155388Flushing stores of hbase:meta,,1.1588230740 at 1733349155388Flushing 1588230740/ns: creating writer at 1733349155389 (+1 ms)Flushing 1588230740/ns: appending metadata at 1733349155400 (+11 ms)Flushing 1588230740/ns: closing flushed file at 1733349155400Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31dadca5: reopening flushed file at 1733349155410 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false at 1733349155416 (+6 ms)Writing region close event to WAL at 1733349155416Running coprocessor post-close hooks at 1733349155420 (+4 ms)Closed at 1733349155420 2024-12-04T21:52:35,420 DEBUG [RS_CLOSE_META-regionserver/bb3046a53f79:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-04T21:52:35,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38693/user/jenkins/test-data/3512449c-f80c-b0a6-3b0f-6c6dcdd8fe9a/MasterData/WALs/bb3046a53f79,38967,1733348978643/bb3046a53f79%2C38967%2C1733348978643.1733348979193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-04T21:52:35,588 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(976): stopping server bb3046a53f79,35315,1733349154532; all regions closed. 2024-12-04T21:52:35,589 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,589 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,590 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,590 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741834_1010 (size=1152) 2024-12-04T21:52:35,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741834_1010 (size=1152) 2024-12-04T21:52:35,599 DEBUG [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs 2024-12-04T21:52:35,599 INFO [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C35315%2C1733349154532.meta:.meta(num 1733349155260) 2024-12-04T21:52:35,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,600 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,600 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,600 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741833_1009 (size=93) 2024-12-04T21:52:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741833_1009 (size=93) 2024-12-04T21:52:35,604 DEBUG [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/oldWALs 2024-12-04T21:52:35,604 INFO [RS:0;bb3046a53f79:35315 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bb3046a53f79%2C35315%2C1733349154532:(num 1733349154913) 2024-12-04T21:52:35,604 DEBUG [RS:0;bb3046a53f79:35315 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-04T21:52:35,604 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.LeaseManager(133): Closed leases 2024-12-04T21:52:35,604 INFO [RS:0;bb3046a53f79:35315 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:52:35,604 INFO [RS:0;bb3046a53f79:35315 {}] hbase.ChoreService(370): Chore service for: regionserver/bb3046a53f79:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-04T21:52:35,604 INFO [RS:0;bb3046a53f79:35315 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:52:35,605 INFO [regionserver/bb3046a53f79:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:52:35,605 INFO [RS:0;bb3046a53f79:35315 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35315 2024-12-04T21:52:35,606 INFO [RS:0;bb3046a53f79:35315 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:52:35,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-04T21:52:35,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bb3046a53f79,35315,1733349154532 2024-12-04T21:52:35,607 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bb3046a53f79,35315,1733349154532] 2024-12-04T21:52:35,607 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bb3046a53f79,35315,1733349154532 already deleted, retry=false 2024-12-04T21:52:35,607 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bb3046a53f79,35315,1733349154532 expired; onlineServers=0 2024-12-04T21:52:35,607 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bb3046a53f79,37307,1733349154493' ***** 2024-12-04T21:52:35,607 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-04T21:52:35,607 INFO [M:0;bb3046a53f79:37307 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-04T21:52:35,607 INFO [M:0;bb3046a53f79:37307 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-04T21:52:35,608 DEBUG [M:0;bb3046a53f79:37307 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-04T21:52:35,608 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-04T21:52:35,608 DEBUG [M:0;bb3046a53f79:37307 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-04T21:52:35,608 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349154653 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.large.0-1733349154653,5,FailOnTimeoutGroup] 2024-12-04T21:52:35,608 DEBUG [master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349154653 {}] cleaner.HFileCleaner(306): Exit Thread[master/bb3046a53f79:0:becomeActiveMaster-HFileCleaner.small.0-1733349154653,5,FailOnTimeoutGroup] 2024-12-04T21:52:35,608 INFO [M:0;bb3046a53f79:37307 {}] hbase.ChoreService(370): Chore service for: master/bb3046a53f79:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-04T21:52:35,608 INFO [M:0;bb3046a53f79:37307 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-04T21:52:35,608 DEBUG [M:0;bb3046a53f79:37307 {}] master.HMaster(1795): Stopping service threads 2024-12-04T21:52:35,608 INFO [M:0;bb3046a53f79:37307 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-04T21:52:35,608 INFO [M:0;bb3046a53f79:37307 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-04T21:52:35,608 INFO [M:0;bb3046a53f79:37307 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-04T21:52:35,608 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-04T21:52:35,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-04T21:52:35,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-04T21:52:35,608 DEBUG [M:0;bb3046a53f79:37307 {}] zookeeper.ZKUtil(347): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-04T21:52:35,609 WARN [M:0;bb3046a53f79:37307 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-04T21:52:35,609 INFO [M:0;bb3046a53f79:37307 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/.lastflushedseqids 2024-12-04T21:52:35,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741838_1014 (size=99) 2024-12-04T21:52:35,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741838_1014 (size=99) 2024-12-04T21:52:35,619 INFO [M:0;bb3046a53f79:37307 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-04T21:52:35,619 INFO [M:0;bb3046a53f79:37307 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-04T21:52:35,619 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-04T21:52:35,619 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:35,619 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:35,619 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-04T21:52:35,619 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:35,619 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-04T21:52:35,632 DEBUG [M:0;bb3046a53f79:37307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4b3adc92a60047049317278767f797f8 is 82, key is hbase:meta,,1/info:regioninfo/1733349155287/Put/seqid=0 2024-12-04T21:52:35,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741839_1015 (size=5672) 2024-12-04T21:52:35,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741839_1015 (size=5672) 2024-12-04T21:52:35,637 INFO [M:0;bb3046a53f79:37307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4b3adc92a60047049317278767f797f8 2024-12-04T21:52:35,653 DEBUG [M:0;bb3046a53f79:37307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d0a6faa92bed4a019d1ef92a9ccfb9e8 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733349155301/Put/seqid=0 2024-12-04T21:52:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741840_1016 (size=5275) 2024-12-04T21:52:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741840_1016 (size=5275) 2024-12-04T21:52:35,657 INFO [M:0;bb3046a53f79:37307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d0a6faa92bed4a019d1ef92a9ccfb9e8 2024-12-04T21:52:35,673 DEBUG [M:0;bb3046a53f79:37307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0db7849341a14932ab133e37bf9af020 is 69, key is bb3046a53f79,35315,1733349154532/rs:state/1733349154771/Put/seqid=0 2024-12-04T21:52:35,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741841_1017 (size=5156) 2024-12-04T21:52:35,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741841_1017 (size=5156) 2024-12-04T21:52:35,678 INFO [M:0;bb3046a53f79:37307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0db7849341a14932ab133e37bf9af020 2024-12-04T21:52:35,694 DEBUG [M:0;bb3046a53f79:37307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f0a0bed9b67470691cd0c74890653a9 is 52, key is load_balancer_on/state:d/1733349155358/Put/seqid=0 2024-12-04T21:52:35,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741842_1018 (size=5056) 2024-12-04T21:52:35,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741842_1018 (size=5056) 2024-12-04T21:52:35,698 INFO [M:0;bb3046a53f79:37307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f0a0bed9b67470691cd0c74890653a9 2024-12-04T21:52:35,702 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4b3adc92a60047049317278767f797f8 as hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4b3adc92a60047049317278767f797f8 2024-12-04T21:52:35,706 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4b3adc92a60047049317278767f797f8, entries=8, sequenceid=29, filesize=5.5 K 2024-12-04T21:52:35,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:35,707 INFO [RS:0;bb3046a53f79:35315 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:52:35,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35315-0x100a7387b1d0001, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:35,707 INFO [RS:0;bb3046a53f79:35315 {}] regionserver.HRegionServer(1031): Exiting; stopping=bb3046a53f79,35315,1733349154532; zookeeper connection closed. 2024-12-04T21:52:35,707 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d0a6faa92bed4a019d1ef92a9ccfb9e8 as hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d0a6faa92bed4a019d1ef92a9ccfb9e8 2024-12-04T21:52:35,707 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2cd82713 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2cd82713 2024-12-04T21:52:35,707 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-04T21:52:35,711 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d0a6faa92bed4a019d1ef92a9ccfb9e8, entries=3, sequenceid=29, filesize=5.2 K 2024-12-04T21:52:35,711 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0db7849341a14932ab133e37bf9af020 as hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0db7849341a14932ab133e37bf9af020 2024-12-04T21:52:35,716 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0db7849341a14932ab133e37bf9af020, entries=1, sequenceid=29, filesize=5.0 K 2024-12-04T21:52:35,717 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f0a0bed9b67470691cd0c74890653a9 as hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f0a0bed9b67470691cd0c74890653a9 2024-12-04T21:52:35,721 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46395/user/jenkins/test-data/83f0e2df-8e66-3d10-e614-5db2f4c5ae8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f0a0bed9b67470691cd0c74890653a9, entries=1, sequenceid=29, filesize=4.9 K 2024-12-04T21:52:35,722 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 103ms, sequenceid=29, compaction requested=false 2024-12-04T21:52:35,723 INFO [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-04T21:52:35,723 DEBUG [M:0;bb3046a53f79:37307 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733349155619Disabling compacts and flushes for region at 1733349155619Disabling writes for close at 1733349155619Obtaining lock to block concurrent updates at 1733349155619Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733349155619Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733349155619Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733349155620 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733349155620Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733349155632 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733349155632Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733349155640 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733349155652 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733349155652Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733349155660 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733349155673 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733349155673Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733349155681 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733349155694 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733349155694Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d7c6171: reopening flushed file at 1733349155702 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67d3d669: reopening flushed file at 1733349155706 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@818f2c1: reopening flushed file at 1733349155711 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3552c2f4: reopening flushed file at 1733349155716 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 103ms, sequenceid=29, compaction requested=false at 1733349155722 (+6 ms)Writing region close event to WAL at 1733349155723 (+1 ms)Closed at 1733349155723 2024-12-04T21:52:35,724 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,724 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,724 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,724 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,724 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-04T21:52:35,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44073 is added to blk_1073741830_1006 (size=10311) 2024-12-04T21:52:35,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33745 is added to blk_1073741830_1006 (size=10311) 2024-12-04T21:52:35,726 INFO [M:0;bb3046a53f79:37307 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-04T21:52:35,726 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-04T21:52:35,727 INFO [M:0;bb3046a53f79:37307 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37307 2024-12-04T21:52:35,727 INFO [M:0;bb3046a53f79:37307 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-04T21:52:35,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:35,828 INFO [M:0;bb3046a53f79:37307 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-04T21:52:35,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37307-0x100a7387b1d0000, quorum=127.0.0.1:52101, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-04T21:52:35,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49e8762f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:52:35,834 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@695357ac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:52:35,834 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:52:35,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b53c208{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:52:35,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ca7b204{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir/,STOPPED} 2024-12-04T21:52:35,836 WARN [BP-413037955-172.17.0.2-1733349153968 heartbeating to localhost/127.0.0.1:46395 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:52:35,836 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:52:35,836 WARN [BP-413037955-172.17.0.2-1733349153968 heartbeating to localhost/127.0.0.1:46395 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-413037955-172.17.0.2-1733349153968 (Datanode Uuid 0acafd75-3d61-4c39-995b-2e10969034eb) service to localhost/127.0.0.1:46395 2024-12-04T21:52:35,836 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:52:35,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data3/current/BP-413037955-172.17.0.2-1733349153968 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:35,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data4/current/BP-413037955-172.17.0.2-1733349153968 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:35,837 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:52:35,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a060c72{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-04T21:52:35,840 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7198cc34{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:52:35,840 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:52:35,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6708b3c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:52:35,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e37dbe3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir/,STOPPED} 2024-12-04T21:52:35,841 WARN [BP-413037955-172.17.0.2-1733349153968 heartbeating to localhost/127.0.0.1:46395 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-04T21:52:35,841 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-04T21:52:35,841 WARN [BP-413037955-172.17.0.2-1733349153968 heartbeating to localhost/127.0.0.1:46395 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-413037955-172.17.0.2-1733349153968 (Datanode Uuid b23f7eb2-266f-4487-92bd-d972b13c90a1) service to localhost/127.0.0.1:46395 2024-12-04T21:52:35,841 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-04T21:52:35,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data1/current/BP-413037955-172.17.0.2-1733349153968 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:35,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/cluster_38bb87b5-4d81-69fb-2dae-b3d3e73d1d54/data/data2/current/BP-413037955-172.17.0.2-1733349153968 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-04T21:52:35,842 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-04T21:52:35,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ba22fb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-04T21:52:35,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6789809{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-04T21:52:35,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-04T21:52:35,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5815605{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-04T21:52:35,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b70ef0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bdf23b43-223f-e2da-2fe3-c220641e4e3a/hadoop.log.dir/,STOPPED} 2024-12-04T21:52:35,853 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-04T21:52:35,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-04T21:52:35,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=283 (was 232) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46395 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46395 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46395 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46395 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46395 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=561 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 137), ProcessCount=11 (was 11), AvailableMemoryMB=2321 (was 2324)